diff --git a/.gitignore b/.gitignore index c84b2fc8c79d6e2c9c83e2b830ab176295846fd0..9622ab78e0e0556ec2b4cc974fee93ff680d54d2 100644 --- a/.gitignore +++ b/.gitignore @@ -24,4 +24,5 @@ cmake-build-* python/paddle/v2/framework/core.so CMakeFiles cmake_install.cmake - +paddle/.timestamp +python/paddlepaddle.egg-info/ diff --git a/.travis.yml b/.travis.yml index 376c693602b56fe719decfeb41c217497e143e12..8c8c6699d3d9abddd65a3a224c2bceedc7d88348 100644 --- a/.travis.yml +++ b/.travis.yml @@ -38,7 +38,7 @@ before_install: # Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python # protobuf version. - pip install numpy wheel 'protobuf==3.1' sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit requests==2.9.2 LinkChecker - - pip install rarfile + - pip install rarfile nltk==3.2.2 scipy==0.19.0 recordio matplotlib Pillow - curl https://glide.sh/get | bash - eval "$(GIMME_GO_VERSION=1.8.3 gimme)" - go get -u github.com/alecthomas/gometalinter diff --git a/Dockerfile b/Dockerfile index 156ad3552b2c4ff90b405c35c66d44117c2624a4..8ac123bf9c0f24b47b741611f3b80213c61b82e9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -28,7 +28,7 @@ RUN apt-get update && \ wget unzip unrar tar xz-utils bzip2 gzip coreutils ntp \ curl sed grep graphviz libjpeg-dev zlib1g-dev \ python-matplotlib gcc-4.8 g++-4.8 \ - automake locales clang-format-3.8 swig doxygen cmake \ + automake locales clang-format swig doxygen cmake \ liblapack-dev liblapacke-dev libboost-dev \ clang-3.8 llvm-3.8 libclang-3.8-dev \ net-tools && \ @@ -38,17 +38,16 @@ RUN apt-get update && \ RUN pip --no-cache-dir install 'numpy>=1.12.0' # Install Go and glide -RUN wget -O go.tgz https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz && \ - tar -C /usr/local -xzf go.tgz && \ +RUN wget -qO- https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | \ + tar -xz -C /usr/local && \ mkdir /root/gopath && \ mkdir /root/gopath/bin && \ - mkdir /root/gopath/src && \ - rm go.tgz + mkdir /root/gopath/src ENV GOROOT=/usr/local/go GOPATH=/root/gopath # should not be in the same line with GOROOT definition, otherwise docker build could not find GOROOT. ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin # install glide -RUN curl -q https://glide.sh/get | sh +RUN curl -s -q https://glide.sh/get | sh # git credential to skip password typing RUN git config --global credential.helper store diff --git a/cmake/external/any.cmake b/cmake/external/any.cmake index 5d2f7219b2007493916a39e839d647a9d0046c9f..85cce80b70a1fcf57015ac7a264e4950616b2717 100644 --- a/cmake/external/any.cmake +++ b/cmake/external/any.cmake @@ -8,7 +8,7 @@ ExternalProject_Add( extern_lib_any ${EXTERNAL_PROJECT_LOG_ARGS} GIT_REPOSITORY "https://github.com/PaddlePaddle/any.git" - GIT_TAG "8fef1e93710a0edf8d7658999e284a1142c4c020" + GIT_TAG "15595d8324be9e8a9a80d9ae442fdd12bd66df5d" PREFIX ${ANY_SOURCE_DIR} UPDATE_COMMAND "" CONFIGURE_COMMAND "" diff --git a/cmake/external/mklml.cmake b/cmake/external/mklml.cmake index 17a1ca4ed04dce85ae3c7fdd5f22d6eeed03db59..e9fd3d4bedc983ae7c544cf289dc841cf22f9de4 100644 --- a/cmake/external/mklml.cmake +++ b/cmake/external/mklml.cmake @@ -17,7 +17,7 @@ IF(NOT ${WITH_MKLML}) ENDIF(NOT ${WITH_MKLML}) IF(WIN32 OR APPLE) - MESSAGE(WARNING + MESSAGE(WARNING "Windows or Mac is not supported with MKLML in Paddle yet." "Force WITH_MKLML=OFF") SET(WITH_MKLML OFF CACHE STRING "Disable MKLML package in Windows and MacOS" FORCE) @@ -43,22 +43,21 @@ SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_RPATH}" "${MKLML_ROOT}/lib") INCLUDE_DIRECTORIES(${MKLML_INC_DIR}) -SET(mklml_cmakefile ${MKLML_DOWNLOAD_DIR}/CMakeLists.txt) -FILE(WRITE ${mklml_cmakefile} "PROJECT(MKLML)\n" - "cmake_minimum_required(VERSION 3.0)\n" - "install(DIRECTORY ${MKLML_VER}\n" - " DESTINATION ${MKLML_DST_DIR})\n") +FILE(WRITE ${MKLML_DOWNLOAD_DIR}/CMakeLists.txt + "PROJECT(MKLML)\n" + "cmake_minimum_required(VERSION 3.0)\n" + "install(DIRECTORY ${MKLML_VER}\n" + " DESTINATION ${MKLML_DST_DIR})\n") ExternalProject_Add( ${MKLML_PROJECT} ${EXTERNAL_PROJECT_LOG_ARGS} PREFIX ${MKLML_SOURCE_DIR} DOWNLOAD_DIR ${MKLML_DOWNLOAD_DIR} - DOWNLOAD_COMMAND wget --no-check-certificate -O ${MKLML_DOWNLOAD_DIR}/${MKLML_VER}.tgz ${MKLML_URL} - && tar -xzf ${MKLML_DOWNLOAD_DIR}/${MKLML_VER}.tgz + DOWNLOAD_COMMAND wget --no-check-certificate -qO- ${MKLML_URL} | tar xz -C ${MKLML_DOWNLOAD_DIR} DOWNLOAD_NO_PROGRESS 1 UPDATE_COMMAND "" - CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${MKLML_INSTALL_ROOT} + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${MKLML_INSTALL_ROOT} CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${MKLML_INSTALL_ROOT} ) diff --git a/doc/api/v2/config/layer.rst b/doc/api/v2/config/layer.rst index 372272a53c12c314fc80eebbce5eae9fcabc55ba..cb330ea5e1b914587a725c9b90a33053f3fbbc3d 100644 --- a/doc/api/v2/config/layer.rst +++ b/doc/api/v2/config/layer.rst @@ -257,6 +257,16 @@ seq_concat .. autoclass:: paddle.v2.layer.seq_concat :noindex: +kmax_sequence_score +------------------- +.. autoclass:: paddle.v2.layer.kmax_sequence_score + :noindex: + +sub_nested_seq +-------------- +.. autoclass:: paddle.v2.layer.sub_nested_seq + :noindex: + Reshaping Layers ================ diff --git a/doc/design/releasing_process.md b/doc/design/releasing_process.md index 3692a5248a355cfcfd1cfd0911d43d65166921b1..0c10e782808ca6456347ec54cb5e921162731ede 100644 --- a/doc/design/releasing_process.md +++ b/doc/design/releasing_process.md @@ -11,6 +11,15 @@ Paddle每次发新的版本,遵循以下流程: * 编译这个版本的Ubuntu Deb包。如果失败,修复Ubuntu Deb包编译问题,Patch号加一,返回第二步。 * 使用Regression Test List作为检查列表,测试Docker镜像/ubuntu安装包的功能正确性 * 如果失败,记录下所有失败的例子,在这个`release/版本号`分支中,修复所有bug后,Patch号加一,返回第二步 + * 编译这个版本的python wheel包,并发布到pypi。 + * 由于pypi.python.org目前遵循[严格的命名规范PEP 513](https://www.python.org/dev/peps/pep-0513),在使用twine上传之前,需要重命名wheel包中platform相关的后缀,比如将`linux_x86_64`修改成`manylinux1_x86_64`。 + * pypi上的package名称为paddlepaddle和paddlepaddle_gpu,如果要上传GPU版本的包,需要修改build/python/setup.py中,name: "paddlepaddle_gpu"并重新打包wheel包:`python setup.py bdist_wheel`。 + * 上传方法: + ``` + cd build/python + pip install twine + twine upload dist/[package to upload] + ``` 4. 第三步完成后,将`release/版本号`分支合入master分支,并删除`release/版本号`分支。将master分支的合入commit打上tag,tag为`版本号`。同时再将`master`分支合入`develop`分支。最后删除`release/版本号`分支。 5. 编译master分支的Docker发行镜像,发布到dockerhub。编译ubuntu的deb包,发布到github release页面 6. 协同完成Release Note的书写 diff --git a/doc/getstarted/build_and_install/docker_install_cn.rst b/doc/getstarted/build_and_install/docker_install_cn.rst index 87c286a1af75e08313813f1373ea03b85d4af523..02b96bb413156786db6dc77696c5640b97c10aa4 100644 --- a/doc/getstarted/build_and_install/docker_install_cn.rst +++ b/doc/getstarted/build_and_install/docker_install_cn.rst @@ -3,6 +3,43 @@ PaddlePaddle的Docker容器使用方式 PaddlePaddle目前唯一官方支持的运行的方式是Docker容器。因为Docker能在所有主要操作系统(包括Linux,Mac OS X和Windows)上运行。 请注意,您需要更改 `Dockers设置 `_ 才能充分利用Mac OS X和Windows上的硬件资源。 +Docker使用入门 +------------------------------ + +几个基础的概念帮助理解和使用Docker: + +- *镜像*:一个Docker镜像是一个打包好的软件。它包含了这个软件本身和它所依赖的运行环境。PaddlePaddle的Docker镜像就包含了PaddlePaddle的Python库以及其依赖的多个Python库。这样我们可以直接在Docker中运行需要的程序而不需要安装后在执行。可以执行: + + .. code-block:: bash + + docker images + + 来列出当前系统中的所有镜像,同样可以执行: + + .. code-block:: bash + + docker pull paddlepaddle/paddle:0.10.0 + + 来下载Docker镜像,paddlepaddle/paddle是从官方镜像源Dockerhub.com下载的,推荐国内用户使用ocker.paddlepaddle.org/paddle下载。 + +- *容器*: 如果说一个Docker镜像就是一个程序,那容器就是这个程序运行时产生的“进程”。 + 实际上,一个容器就是一个操作系统的进程,但是是运行在独立的进程空间,文件系统以及网络之上。 + 可以执行: + + .. code-block:: bash + + docker run paddlepaddle/paddle:0.10.0 + + 来使用一个镜像启动一个容器。 + +- 默认情况下,Docker容器会运行在独立的文件系统空间之上,我们无法在Docker容器中 + 访问到主机上的文件。可以通过*挂载Volume*的方式,将主机上的文件或目录挂载到 + Docker容器中。下面的命令把当前目录挂载到了容器中的 /data 目录下,容器使用 + debian镜像,并且启动后执行 :code:`ls /data`。 + + .. code-block:: bash + + docker run --rm -v $(pwd):/data debian ls /data PaddlePaddle发布的Docker镜像使用说明 ------------------------------ @@ -12,11 +49,11 @@ PaddlePaddle需要的所有编译工具。把编译出来的PaddlePaddle也打 像,称为生产镜像,里面涵盖了PaddlePaddle运行所需的所有环境。每次 PaddlePaddle发布新版本的时候都会发布对应版本的生产镜像以及开发镜像。运 行镜像包括纯CPU版本和GPU版本以及其对应的非AVX版本。我们会在 -`dockerhub.com `_ 提供最新 -的Docker镜像,可以在"tags"标签下找到最新的Paddle镜像版本。为了方便在国 -内的开发者下载Docker镜像,我们提供了国内的镜像服务器供大家使用。如果您 -在国内,请把文档里命令中的paddlepaddle/paddle替换成 -docker.paddlepaddle.org/paddle。 +`dockerhub.com `_ +和国内镜像`docker.paddlepaddle.org` 提供最新 +的Docker镜像,可以在"tags"标签下找到最新的Paddle镜像版本。 + +**注意:为了方便在国内的开发者下载Docker镜像,我们提供了国内的镜像服务器供大家使用。如果您在国内,请把文档里命令中的paddlepaddle/paddle替换成docker.paddlepaddle.org/paddle。** 1. 开发镜像::code:`paddlepaddle/paddle:0.10.0-dev` @@ -68,6 +105,8 @@ docker.paddlepaddle.org/paddle。 如果输出是No,就需要选择使用no-AVX的镜像 + **注:在0.10.0之后的版本,PaddlePaddle都可以自动判断硬件是否支持AVX,所以无需判断AVX即可使用** + 以上方法在GPU镜像里也能用,只是请不要忘记提前在物理机上安装GPU最新驱动。 为了保证GPU驱动能够在镜像里面正常运行,我们推荐使用[nvidia-docker](https://github.com/NVIDIA/nvidia-docker)来运行镜像。 diff --git a/doc/getstarted/build_and_install/docker_install_en.rst b/doc/getstarted/build_and_install/docker_install_en.rst index b6fd3329b273aabe80edd5f1ff064a311648b3c2..94860240f6a4a9bed8a865684a8a79960489280e 100644 --- a/doc/getstarted/build_and_install/docker_install_en.rst +++ b/doc/getstarted/build_and_install/docker_install_en.rst @@ -63,12 +63,35 @@ CPU-only version and a CUDA GPU version and their no-AVX versions. We put the docker images on `dockerhub.com `_. You can find the -latest versions under "tags" tab at dockerhub.com. If you are in -China, you can use our Docker image registry mirror to speed up the -download process. To use it, please replace all paddlepaddle/paddle in -the commands to docker.paddlepaddle.org/paddle. +latest versions under "tags" tab at dockerhub.com. -1. Production images, this image might have multiple variants: +** NOTE: If you are in China, you can use our Docker image registry mirror to speed up the download process. To use it, please replace all paddlepaddle/paddle in the commands to docker.paddlepaddle.org/paddle.** + + +1. development image :code:`paddlepaddle/paddle:-dev` + + This image has packed related develop tools and runtime + environment. Users and developers can use this image instead of + their own local computer to accomplish development, build, + releasing, document writing etc. While different version of paddle + may depends on different version of libraries and tools, if you + want to setup a local environment, you must pay attention to the + versions. The development image contains: + + - gcc/clang + - nvcc + - Python + - sphinx + - woboq + - sshd + + Many developers use servers with GPUs, they can use ssh to login to + the server and run :code:`docker exec` to enter the docker + container and start their work. Also they can start a development + docker image with SSHD service, so they can login to the container + and start work. + +2. Production images, this image might have multiple variants: - GPU/AVX::code:`paddlepaddle/paddle:-gpu` - GPU/no-AVX::code:`paddlepaddle/paddle:-gpu-noavx` @@ -84,7 +107,7 @@ the commands to docker.paddlepaddle.org/paddle. if cat /proc/cpuinfo | grep -i avx; then echo Yes; else echo No; fi - + **NOTE:versions after 0.10.0 will automatically detect system AVX support, so manual detect is not needed in this case.** To run the CPU-only image as an interactive container: .. code-block:: bash @@ -103,29 +126,6 @@ the commands to docker.paddlepaddle.org/paddle. nvidia-docker run -it --rm paddlepaddle/paddle:0.10.0-gpu /bin/bash -2. development image :code:`paddlepaddle/paddle:-dev` - - This image has packed related develop tools and runtime - environment. Users and developers can use this image instead of - their own local computer to accomplish development, build, - releasing, document writing etc. While different version of paddle - may depends on different version of libraries and tools, if you - want to setup a local environment, you must pay attention to the - versions. The development image contains: - - - gcc/clang - - nvcc - - Python - - sphinx - - woboq - - sshd - - Many developers use servers with GPUs, they can use ssh to login to - the server and run :code:`docker exec` to enter the docker - container and start their work. Also they can start a development - docker image with SSHD service, so they can login to the container - and start work. - Train Model Using Python API ---------------------------- diff --git a/doc/templates/conf.py.cn.in b/doc/templates/conf.py.cn.in index 95cad835b11816f4d2e256c2abd662a545a5bad2..673948dfe7928240817b552141ec9bc2f8a672b7 100644 --- a/doc/templates/conf.py.cn.in +++ b/doc/templates/conf.py.cn.in @@ -13,15 +13,11 @@ # serve to show the default. import sys import os, subprocess +sys.path.insert(0, os.path.abspath('@PROJ_ROOT@/python')) import shlex from recommonmark import parser, transform -try: - import py_paddle - import paddle - import paddle.v2 -except ImportError: - print("Must install paddle python package before generating documentation") - sys.exit(1) +import paddle +import paddle.v2 MarkdownParser = parser.CommonMarkParser AutoStructify = transform.AutoStructify diff --git a/doc/templates/conf.py.en.in b/doc/templates/conf.py.en.in index b477f0120c4fa0544012080b7cfb8572d3c44b04..b6b50b7dcd5647b50a13703160489323ed90a1b4 100644 --- a/doc/templates/conf.py.en.in +++ b/doc/templates/conf.py.en.in @@ -13,15 +13,11 @@ # serve to show the default. import sys import os, subprocess +sys.path.insert(0, os.path.abspath('@PROJ_ROOT@/python')) import shlex from recommonmark import parser, transform -try: - import py_paddle - import paddle - import paddle.v2 -except ImportError: - print("Must install paddle python package before generating documentation") - sys.exit(1) +import paddle +import paddle.v2 MarkdownParser = parser.CommonMarkParser diff --git a/go/cmd/pserver/pserver.go b/go/cmd/pserver/pserver.go index f9cd8f87e8f2e715c87834ee08482be0f511f681..bec5775d540729000ab2dd3002600f0a92619d70 100644 --- a/go/cmd/pserver/pserver.go +++ b/go/cmd/pserver/pserver.go @@ -32,7 +32,7 @@ import ( func main() { port := flag.Int("port", 0, "port of the pserver") - index := flag.Int("index", -1, "index of this pserver, should be larger or equal than 0") + index := flag.Int("index", -1, "index of the pserver, set to -1 if use etcd for auto pserver index registry") etcdEndpoint := flag.String("etcd-endpoint", "http://127.0.0.1:2379", "comma separated endpoint string for pserver to connect to etcd") dialTimeout := flag.Duration("dial-timeout", 5*time.Second, "dial timeout") @@ -60,12 +60,12 @@ func main() { idx, err = e.Register(*port) candy.Must(err) - cp, err = pserver.NewCheckpointFromFile(*checkpointPath, idx, e) + cp, err = pserver.LoadCheckpoint(e, idx) if err != nil { if err == pserver.ErrCheckpointNotFound { log.Infof("Could not find the pserver checkpoint.") } else { - log.Errorf("Fetch checkpoint failed, %s", err) + panic(err) } } } diff --git a/go/glide.lock b/go/glide.lock index 1f16abdf66422abcd0ab7987cab3499d02cf1b9c..1ecdd217520e0a62b546b4c7048a25f4316d3f37 100644 --- a/go/glide.lock +++ b/go/glide.lock @@ -1,5 +1,5 @@ -hash: 2a1c0eca5c07a130e3d224f9821f96cfa37a39bf6bce141c855bbc57ef569f1c -updated: 2017-07-29T07:34:48.722757905+08:00 +hash: 1b9b07408ca7fac27a374dc2ccd2433e4bff090484008a037df967284949a582 +updated: 2017-08-07T23:37:48.867469328Z imports: - name: github.com/beorn7/perks version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 @@ -10,7 +10,7 @@ imports: - name: github.com/cockroachdb/cmux version: 112f0506e7743d64a6eb8fedbcff13d9979bbf92 - name: github.com/coreos/etcd - version: c31bec0f29facff13f7c3e3d948e55dd6689ed42 + version: d0d1a87aa96ae14914751d42264262cb69eda170 subpackages: - alarm - auth @@ -24,6 +24,7 @@ imports: - error - etcdserver - etcdserver/api + - etcdserver/api/etcdhttp - etcdserver/api/v2http - etcdserver/api/v2http/httptypes - etcdserver/api/v3client @@ -145,6 +146,8 @@ imports: version: a1dba9ce8baed984a2495b658c82687f8157b98f subpackages: - xfs +- name: github.com/satori/go.uuid + version: 879c5887cd475cd7864858769793b2ceb0d44feb - name: github.com/sirupsen/logrus version: a3f95b5c423586578a4e099b11a46c2479628cac - name: github.com/topicai/candy @@ -208,11 +211,6 @@ testImports: version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 subpackages: - spew -- name: github.com/docker/docker - version: b6d164e6c46d8115b146e4c3ac93784e9ef8b49e - subpackages: - - pkg/ioutils - - pkg/longpath - name: github.com/pmezard/go-difflib version: d8ed2627bdf02c080bf22230dbb337003b7aba2d subpackages: diff --git a/go/glide.yaml b/go/glide.yaml index bc23fa6ebf2c3db61e2d63e5f7e7ddcb595dfed0..a90e71b615de92d64c79823e2a04c46001963932 100644 --- a/go/glide.yaml +++ b/go/glide.yaml @@ -14,11 +14,13 @@ import: version: ^1.0.0 - package: github.com/topicai/candy - package: golang.org/x/crypto - vcs: git repo: https://github.com/golang/crypto.git -- package: golang.org/x/sys vcs: git +- package: golang.org/x/sys repo: https://github.com/golang/sys.git -- package: golang.org/x/text vcs: git +- package: golang.org/x/text repo: https://github.com/golang/text.git + vcs: git +- package: github.com/satori/go.uuid + version: v1.1.0 diff --git a/go/master/service.go b/go/master/service.go index d30e9a33229c0aff354417771b5bf2ae6a781715..df7c6860e6ae13a5be7d0425273812208685ee9d 100644 --- a/go/master/service.go +++ b/go/master/service.go @@ -77,11 +77,12 @@ type taskEntry struct { NumFailure int } -type taskQueues struct { +type masterState struct { Todo []taskEntry Pending map[int]taskEntry // map from task ID to task entry Done []taskEntry Failed []taskEntry + CurPass int } // Service is the master server service. @@ -94,11 +95,11 @@ type Service struct { ready chan struct{} initDone bool - mu sync.Mutex - taskQueues taskQueues - currPass int - jobTasks []taskEntry - + mu sync.Mutex + // State to be persisted to snapshot. + state masterState + // The trainer that is currently saving model. This state is + // transient, does not need to be persisted to snapshot. savingTrainer string } @@ -141,8 +142,8 @@ func NewService(store Store, chunksPerTask int, timeoutDur time.Duration, failur s.chunksPerTask = chunksPerTask s.timeoutDur = timeoutDur s.failureMax = failureMax - s.taskQueues = taskQueues{} - s.taskQueues.Pending = make(map[int]taskEntry) + s.state = masterState{} + s.state.Pending = make(map[int]taskEntry) s.ready = make(chan struct{}) s.store = store recovered, err := s.recover() @@ -180,7 +181,7 @@ func (s *Service) recover() (bool, error) { } dec := gob.NewDecoder(gr) - var tqs taskQueues + var tqs masterState err = dec.Decode(&tqs) if err != nil { return false, err @@ -193,7 +194,12 @@ func (s *Service) recover() (bool, error) { log.Errorln(err) } - s.taskQueues = tqs + s.state = tqs + log.WithFields(s.logFields()).Infof("Master recovered from snapshot, scheduling pending task timeout check.") + for _, t := range s.state.Pending { + time.AfterFunc(s.timeoutDur, s.checkTimeoutFunc(t.Task.Meta.ID, t.Task.Meta.Epoch)) + } + return true, nil } @@ -208,7 +214,7 @@ func (s *Service) snapshot() error { var buf bytes.Buffer gw := gzip.NewWriter(&buf) enc := gob.NewEncoder(gw) - err := enc.Encode(s.taskQueues) + err := enc.Encode(s.state) if err != nil { return err } @@ -290,8 +296,7 @@ func (s *Service) SetDataset(globPaths []string, _ *int) error { return err } - s.jobTasks = partition(chunks, s.chunksPerTask) - s.taskQueues.Todo = s.jobTasks + s.state.Todo = partition(chunks, s.chunksPerTask) err = s.snapshot() if err != nil { @@ -319,17 +324,17 @@ func (s *Service) processFailedTask(t taskEntry, epoch int) { } }() - delete(s.taskQueues.Pending, t.Task.Meta.ID) + delete(s.state.Pending, t.Task.Meta.ID) t.NumFailure++ if t.NumFailure > s.failureMax { log.Warningf("Task %v failed %d times, discard.", t.Task, t.NumFailure) - s.taskQueues.Failed = append(s.taskQueues.Failed, t) + s.state.Failed = append(s.state.Failed, t) return } log.Warningf("Task %v failed %d times, re-dispatch.", t.Task, t.NumFailure) - s.taskQueues.Todo = append(s.taskQueues.Todo, t) + s.state.Todo = append(s.state.Todo, t) return } @@ -338,7 +343,7 @@ func (s *Service) checkTimeoutFunc(taskID int, epoch int) func() { s.mu.Lock() defer s.mu.Unlock() - t, ok := s.taskQueues.Pending[taskID] + t, ok := s.state.Pending[taskID] if !ok { return } @@ -350,10 +355,11 @@ func (s *Service) checkTimeoutFunc(taskID int, epoch int) func() { // must be called with lock held. func (s *Service) logFields() log.Fields { return log.Fields{ - "todoLen": len(s.taskQueues.Todo), - "pendingLen": len(s.taskQueues.Pending), - "doneLen": len(s.taskQueues.Done), - "failedLen": len(s.taskQueues.Failed), + "todoLen": len(s.state.Todo), + "pendingLen": len(s.state.Pending), + "doneLen": len(s.state.Done), + "failedLen": len(s.state.Failed), + "curPass": s.state.CurPass, } } @@ -366,17 +372,17 @@ func (s *Service) GetTask(passID int, task *Task) error { s.mu.Lock() defer s.mu.Unlock() - if passID < s.currPass { + if passID < s.state.CurPass { return ErrPassBefore } - if passID > s.currPass { + if passID > s.state.CurPass { // Client may get run to pass after master when one client faster than the // other return ErrPassAfter } - if len(s.taskQueues.Todo) == 0 { - if len(s.taskQueues.Done) == 0 && len(s.taskQueues.Pending) == 0 { + if len(s.state.Todo) == 0 { + if len(s.state.Done) == 0 && len(s.state.Pending) == 0 { log.WithFields(s.logFields()).Warningln("All tasks failed, may start next pass") return ErrAllTaskFailed } @@ -384,10 +390,10 @@ func (s *Service) GetTask(passID int, task *Task) error { return ErrNoMoreAvailable } - t := s.taskQueues.Todo[0] + t := s.state.Todo[0] t.Task.Meta.Epoch++ - s.taskQueues.Todo = s.taskQueues.Todo[1:] - s.taskQueues.Pending[t.Task.Meta.ID] = t + s.state.Todo = s.state.Todo[1:] + s.state.Pending[t.Task.Meta.ID] = t err := s.snapshot() if err != nil { return err @@ -409,7 +415,7 @@ func (s *Service) TaskFinished(taskID int, dummy *int) error { s.mu.Lock() defer s.mu.Unlock() - t, ok := s.taskQueues.Pending[taskID] + t, ok := s.state.Pending[taskID] if !ok { log.WithFields(s.logFields()).Warningln("Pending task #%d not found.", taskID) return nil @@ -417,18 +423,18 @@ func (s *Service) TaskFinished(taskID int, dummy *int) error { // task finished, reset timeout t.NumFailure = 0 - s.taskQueues.Done = append(s.taskQueues.Done, t) - delete(s.taskQueues.Pending, taskID) + s.state.Done = append(s.state.Done, t) + delete(s.state.Pending, taskID) log.WithFields(s.logFields()).Infof("Task #%d finished.", taskID) - if len(s.taskQueues.Todo) == 0 && len(s.taskQueues.Pending) == 0 { + if len(s.state.Todo) == 0 && len(s.state.Pending) == 0 { // increase master side pass count if all tasks finished - s.currPass++ - s.taskQueues.Todo = s.jobTasks - s.taskQueues.Done = []taskEntry{} + s.state.CurPass++ + s.state.Todo = append(s.state.Done, s.state.Failed...) + s.state.Done = []taskEntry{} // TODO(typhoonzero): deal with failed tasks - s.taskQueues.Failed = []taskEntry{} - log.WithFields(s.logFields()).Warningf("all task finished, add new pass data, newpass: %d.", s.currPass) + s.state.Failed = []taskEntry{} + log.WithFields(s.logFields()).Warningf("all task finished, add new pass data, newpass: %d.", s.state.CurPass) } err := s.snapshot() @@ -447,7 +453,7 @@ func (s *Service) TaskFailed(meta TaskMeta, dummy *int) error { s.mu.Lock() defer s.mu.Unlock() - t, ok := s.taskQueues.Pending[meta.ID] + t, ok := s.state.Pending[meta.ID] if !ok { log.WithFields(s.logFields()).Warningln("TaskFailed:Pending task #%v not found.", t.Task.Meta) return nil diff --git a/go/master/service_test.go b/go/master/service_test.go index 5f91910ecc8cf32289e71e2e41e8b283acc115e6..2d00c22d6feb7177da5c19c557fd16d7925ef6d1 100644 --- a/go/master/service_test.go +++ b/go/master/service_test.go @@ -1,24 +1,30 @@ package master_test import ( + "io/ioutil" + "net/url" "os" + "strings" "testing" "time" "github.com/PaddlePaddle/Paddle/go/master" "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/embed" - "github.com/docker/docker/pkg/ioutils" "github.com/stretchr/testify/assert" ) func TestNewServiceWithEtcd(t *testing.T) { // setup an embed etcd server - etcdDir, err := ioutils.TempDir("", "") + etcdDir, err := ioutil.TempDir("", "") if err != nil { t.Fatal(err) } cfg := embed.NewConfig() + lpurl, _ := url.Parse("http://localhost:0") + lcurl, _ := url.Parse("http://localhost:0") + cfg.LPUrls = []url.URL{*lpurl} + cfg.LCUrls = []url.URL{*lcurl} cfg.Dir = etcdDir e, err := embed.StartEtcd(cfg) if err != nil { @@ -30,15 +36,13 @@ func TestNewServiceWithEtcd(t *testing.T) { t.Fatal(err) } }() - select { - case <-e.Server.ReadyNotify(): - t.Log("Server is ready!") - case <-time.After(60 * time.Second): - e.Server.Stop() // trigger a shutdown - t.Fatal("Server took too long to start!") - } - ep := []string{"127.0.0.1:2379"} + <-e.Server.ReadyNotify() + + port := strings.Split(e.Clients[0].Addr().String(), ":")[1] + endpoint := "127.0.0.1:" + port + + ep := []string{endpoint} masterAddr := "127.0.0.1:3306" store, err := master.NewEtcdClient(ep, masterAddr, master.DefaultLockPath, master.DefaultAddrPath, master.DefaultStatePath, 30) if err != nil { diff --git a/go/pserver/client/c/cclient.go b/go/pserver/client/c/cclient.go index 14ad0774550f6e5a5d8610d6007904cd2820432c..a49cd01522b8b49a74f21fcb97e9eeb1fbb2d272 100644 --- a/go/pserver/client/c/cclient.go +++ b/go/pserver/client/c/cclient.go @@ -90,8 +90,12 @@ func cArrayToSlice(p unsafe.Pointer, len int) []byte { type selector bool -func (s selector) Select() bool { - return bool(s) +func (s selector) Select() (bool, error) { + return bool(s), nil +} + +func (s selector) Done() error { + return nil } type lister []client.Server @@ -114,11 +118,10 @@ func paddle_new_pserver_client(addrs *C.char, selected int) C.paddle_pserver_cli } //export paddle_new_etcd_pserver_client -func paddle_new_etcd_pserver_client(etcdEndpoints *C.char, selected int) C.paddle_pserver_client { - // TODO(Longfei: use etcd lock to decide which trainer to initialize the parameters) +func paddle_new_etcd_pserver_client(etcdEndpoints *C.char) C.paddle_pserver_client { addr := C.GoString(etcdEndpoints) etcdClient := client.NewEtcd(addr) - c := client.NewClient(etcdClient, etcdClient.Desired(), selector(selected != 0)) + c := client.NewClient(etcdClient, etcdClient.Desired(), etcdClient) return add(c) } @@ -136,7 +139,12 @@ func paddle_pserver_client_release(client C.paddle_pserver_client) { //export paddle_begin_init_params func paddle_begin_init_params(client C.paddle_pserver_client) C.int { c := get(client) - if selected := c.BeginInitParams(); selected { + selected, err := c.BeginInitParams() + if err != nil { + panic(err) + } + + if selected { return 1 } return 0 diff --git a/go/pserver/client/client.go b/go/pserver/client/client.go index 15adda4735b022c16cb22715fb690b3740e58b76..20d91e77034e1a0c6825bc401175e6dc1afec52f 100644 --- a/go/pserver/client/client.go +++ b/go/pserver/client/client.go @@ -27,9 +27,13 @@ import ( // TODO(helin): add RPC call retry logic -// Selector selects if the client should initialize parameter servers. +// Selector selects if the client should initialize parameters and +// reports the initialization process done. type Selector interface { - Select() bool + // Select selects if the client should initialize parameter servers. + Select() (bool, error) + // Done indicates the initialization process is done. + Done() error } // Server is the identification of a parameter Server. @@ -115,7 +119,7 @@ func (c *Client) monitorPservers(l Lister, pserverNum int) { // servers. Other trainers will be blocked until the initialization is // done, and they need to get the initialized parameters from // parameter servers using GetParams. -func (c *Client) BeginInitParams() bool { +func (c *Client) BeginInitParams() (bool, error) { return c.sel.Select() } diff --git a/go/pserver/client/client_test.go b/go/pserver/client/client_test.go index b630d434dca283df67f5b850b35057870fe27529..c3d88e926d7cb5f3027be26a270bee6f2db65f31 100644 --- a/go/pserver/client/client_test.go +++ b/go/pserver/client/client_test.go @@ -59,7 +59,7 @@ func initClient() [numPserver]int { go func(l net.Listener) { var cp pserver.Checkpoint - s, err := pserver.NewService(0, 1, "", nil, cp) + s, err := pserver.NewService(0, time.Hour, "", nil, cp) if err != nil { panic(err) } @@ -124,8 +124,12 @@ func initEtcdClient() { type selector bool -func (s selector) Select() bool { - return bool(s) +func (s selector) Select() (bool, error) { + return bool(s), nil +} + +func (s selector) Done() error { + return nil } type lister []client.Server @@ -135,7 +139,11 @@ func (l lister) List() []client.Server { } func testClient(t *testing.T, c *client.Client) { - selected := c.BeginInitParams() + selected, err := c.BeginInitParams() + if err != nil { + t.Fatal(err) + } + if !selected { t.Fatal("should be selected.") } diff --git a/go/pserver/client/etcd_client.go b/go/pserver/client/etcd_client.go index b6ff1fec8a6f37f61f38cb5d004b1d2c886473ed..f9071caaa8f5ac32d426b1d4344a30262202b96d 100644 --- a/go/pserver/client/etcd_client.go +++ b/go/pserver/client/etcd_client.go @@ -16,53 +16,60 @@ package client import ( "context" + "errors" + "fmt" "strconv" "strings" "time" "github.com/PaddlePaddle/Paddle/go/pserver" "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/clientv3/concurrency" log "github.com/sirupsen/logrus" ) const ( defaultEtcdTimeout time.Duration = 5 * time.Second + + initLockPath = "/init_ps/lock" + initDonePath = "/init_ps/done" + initDoneVal = "1" ) -// EtcdClient is used by pserver client that is a part of trainer process. +// Etcd is used by pserver client that is a part of trainer process. // TODO: -// 1. add watcher to watch the change state of pservers) -// 1. add etcd lock) -type EtcdClient struct { +// 1. add watcher to watch the change state of pservers. +type Etcd struct { client *clientv3.Client timeout time.Duration endpoints []string + lock *concurrency.Mutex } // Desired read ps desired number from etcd. -func (p *EtcdClient) Desired() int { +func (e *Etcd) Desired() int { var psDesired int for { - ctx, cancel := context.WithTimeout(context.Background(), p.timeout) - resp, err := p.client.Get(ctx, pserver.PsDesired) + ctx, cancel := context.WithTimeout(context.Background(), e.timeout) + resp, err := e.client.Get(ctx, pserver.PsDesired) cancel() if err != nil { log.Errorf("Get ps dresire number failed! recnnectiong..., %v", err) - time.Sleep(p.timeout) + time.Sleep(e.timeout) continue } kvs := resp.Kvs if len(kvs) == 0 { log.Infoln("Waiting for ps desired registered ...") - time.Sleep(p.timeout) + time.Sleep(e.timeout) continue } psDesired, err = strconv.Atoi(string(resp.Kvs[0].Value)) if err != nil { log.Errorf("psDesired %d invalid %v", psDesired, err) - time.Sleep(p.timeout) + time.Sleep(e.timeout) continue } @@ -73,26 +80,26 @@ func (p *EtcdClient) Desired() int { } // List return the pserver list read from etcd. -func (p *EtcdClient) List() []Server { - psDesired := p.Desired() +func (e *Etcd) List() []Server { + psDesired := e.Desired() servers := make([]Server, psDesired) for { for i := 0; i < psDesired; i++ { - ctx, cancel := context.WithTimeout(context.Background(), p.timeout) + ctx, cancel := context.WithTimeout(context.Background(), e.timeout) psKey := pserver.PsPath + strconv.Itoa(i) log.Debugf("checking %s", psKey) - resp, err := p.client.Get(ctx, psKey) + resp, err := e.client.Get(ctx, psKey) cancel() if err != nil { log.Infof("Get psKey= %s error, %v", psKey, err) - time.Sleep(p.timeout) + time.Sleep(e.timeout) continue } kvs := resp.Kvs if len(kvs) == 0 { log.Infof("Waiting for ps addr registered ...") - time.Sleep(p.timeout) + time.Sleep(e.timeout) continue } @@ -100,10 +107,10 @@ func (p *EtcdClient) List() []Server { // TODO(Longfei) check the ps address if psAddr == "" { log.Infof("Get psKey = %s, psAddr is empty", psKey) - time.Sleep(p.timeout) + time.Sleep(e.timeout) continue } - log.Infof("got value (%s) for key: %s", psAddr, psKey) + log.Debugf("got value (%s) for key: %s", psAddr, psKey) servers[i].Index = i servers[i].Addr = psAddr } @@ -113,7 +120,7 @@ func (p *EtcdClient) List() []Server { } // NewEtcd create a etcd client to return the state of pserver on etcd. -func NewEtcd(endpoints string) *EtcdClient { +func NewEtcd(endpoints string) *Etcd { ep := strings.Split(endpoints, ",") var cli *clientv3.Client var err error @@ -130,10 +137,118 @@ func NewEtcd(endpoints string) *EtcdClient { break } log.Infof("Connected to etcd: %s\n", endpoints) - client := &EtcdClient{ + client := &Etcd{ client: cli, timeout: defaultEtcdTimeout, endpoints: ep, } return client } + +// Select indicates if the current trainer is selected to initialize +// the pserver parameters. +func (e *Etcd) Select() (bool, error) { + sess, err := concurrency.NewSession(e.client, concurrency.WithTTL(5)) + if err != nil { + return false, err + } + + lock := concurrency.NewMutex(sess, initLockPath) + log.Infof("Trying to acquire lock at %s.", initLockPath) + // Do not use timeout context here, since we don't know how + // long does it take for other trainers to initialize the + // parameters. + err = lock.Lock(context.Background()) + if err != nil { + return false, err + } + log.Infof("Successfully acquired lock at %s.", initLockPath) + + get := clientv3.OpGet(initDonePath) + ctx, cancel := context.WithTimeout(context.Background(), e.timeout) + tresp, err := e.client.Txn(ctx).If(lock.IsOwner()).Then(get).Commit() + cancel() + if err != nil { + return false, err + } + + if !tresp.Succeeded { + return false, errors.New("no longer the owner of the lock") + } + + resp := tresp.Responses[0].GetResponseRange() + + if len(resp.Kvs) == 0 { + // Key value not set, select current trainer. + e.lock = lock + log.Infoln("Trainer selected.") + return true, nil + } + + if string(resp.Kvs[0].Value) == initDoneVal { + log.Infoln("Initialization is already done.") + ctx, cancel = context.WithTimeout(context.Background(), e.timeout) + err = lock.Unlock(ctx) + cancel() + if err != nil { + log.Errorln(err) + } + return false, nil + } + + return false, fmt.Errorf("key %s have unexpected value: %v", initDonePath, resp.Kvs[0].Value) +} + +// Done indicates the parameter initialization process is done. +func (e *Etcd) Done() error { + if e.lock == nil { + return errors.New("lock is nil, Done called unexpectedly") + } + + put := clientv3.OpPut(initDonePath, initDoneVal) + ctx, cancel := context.WithTimeout(context.Background(), e.timeout) + tresp, err := e.client.Txn(ctx).If(e.lock.IsOwner()).Then(put).Commit() + cancel() + if err != nil { + return err + } + + if !tresp.Succeeded { + return errors.New("no longer the owner of the lock") + } + + ctx, cancel = context.WithTimeout(context.Background(), e.timeout) + err = e.lock.Unlock(ctx) + cancel() + if err != nil { + log.Errorln(err) + } else { + e.lock = nil + } + + return nil +} + +// Close closes the etcd client. +func (e *Etcd) Close() error { + var err error + if e.lock != nil { + ctx, cancel := context.WithTimeout(context.Background(), e.timeout) + err = e.lock.Unlock(ctx) + cancel() + if err == nil { + e.lock = nil + } + } + + cErr := e.client.Close() + if cErr != nil { + if err != nil { + log.Errorln(cErr) + return err + } + return cErr + } + + return err +} diff --git a/go/pserver/client/etcd_client_test.go b/go/pserver/client/etcd_client_test.go new file mode 100644 index 0000000000000000000000000000000000000000..08742433e7a266fbd39e34f4b92ac4cc4caeb0fb --- /dev/null +++ b/go/pserver/client/etcd_client_test.go @@ -0,0 +1,106 @@ +package client_test + +import ( + "io/ioutil" + "net/url" + "os" + "strings" + "sync" + "testing" + + "github.com/PaddlePaddle/Paddle/go/pserver/client" + "github.com/coreos/etcd/embed" +) + +func TestSelector(t *testing.T) { + etcdDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + cfg := embed.NewConfig() + lpurl, _ := url.Parse("http://localhost:0") + lcurl, _ := url.Parse("http://localhost:0") + cfg.LPUrls = []url.URL{*lpurl} + cfg.LCUrls = []url.URL{*lcurl} + cfg.Dir = etcdDir + e, err := embed.StartEtcd(cfg) + if err != nil { + t.Fatal(err) + } + + defer func() { + e.Close() + if err := os.RemoveAll(etcdDir); err != nil { + t.Fatal(err) + } + }() + + <-e.Server.ReadyNotify() + + port := strings.Split(e.Clients[0].Addr().String(), ":")[1] + endpoint := "127.0.0.1:" + port + + var mu sync.Mutex + selectedCount := 0 + var wg sync.WaitGroup + selectAndDone := func(c *client.Etcd) { + defer wg.Done() + + selected, err := c.Select() + if err != nil { + panic(err) + } + + if selected { + mu.Lock() + selectedCount++ + mu.Unlock() + err = c.Done() + if err != nil { + t.Fatal(err) + } + } + } + + c0 := client.NewEtcd(endpoint) + c1 := client.NewEtcd(endpoint) + c2 := client.NewEtcd(endpoint) + c3 := client.NewEtcd(endpoint) + wg.Add(3) + go selectAndDone(c0) + go selectAndDone(c1) + go selectAndDone(c2) + wg.Wait() + + // simulate trainer crashed and restarted after the + // initialization process. + wg.Add(1) + go selectAndDone(c3) + wg.Wait() + + mu.Lock() + if selectedCount != 1 { + t.Fatal("selected count wrong:", selectedCount) + } + mu.Unlock() + + err = c0.Close() + if err != nil { + t.Fatal(err) + } + + err = c1.Close() + if err != nil { + t.Fatal(err) + } + + err = c2.Close() + if err != nil { + t.Fatal(err) + } + + err = c3.Close() + if err != nil { + t.Fatal(err) + } +} diff --git a/go/pserver/etcd_client.go b/go/pserver/etcd_client.go index 4fb26307667295ab825d07be6c3d1d4b33f6eb8b..41f0640fc09a3265c0e11c06255c7ee834983203 100644 --- a/go/pserver/etcd_client.go +++ b/go/pserver/etcd_client.go @@ -206,6 +206,7 @@ func (e *EtcdClient) GetKey(key string, timeout time.Duration) ([]byte, error) { if err != nil { return []byte{}, err } + kvs := resp.Kvs if len(kvs) == 0 { return []byte{}, nil @@ -215,9 +216,14 @@ func (e *EtcdClient) GetKey(key string, timeout time.Duration) ([]byte, error) { } // PutKey put into etcd with value by key specified -func (e *EtcdClient) PutKey(key string, value []byte, timeout time.Duration) error { +func (e *EtcdClient) PutKey(key string, value []byte, timeout time.Duration, withLease bool) error { ctx, cancel := context.WithTimeout(context.Background(), timeout) - _, err := e.client.Put(ctx, key, string(value), clientv3.WithLease(e.sess.Lease())) + var err error + if withLease { + _, err = e.client.Put(ctx, key, string(value), clientv3.WithLease(e.sess.Lease())) + } else { + _, err = e.client.Put(ctx, key, string(value)) + } cancel() return err } diff --git a/go/pserver/optimizer.go b/go/pserver/optimizer.go index 709160d45d98b6cf6d60f52ceb3fb33e0a0bd17d..ae7359073494bd9cb6b70b12af4daca064179556 100644 --- a/go/pserver/optimizer.go +++ b/go/pserver/optimizer.go @@ -32,6 +32,7 @@ type optimizer struct { opt *C.struct_paddle_optimizer elementType ElementType contentLen int + config []byte } func cArrayToSlice(p unsafe.Pointer, len int) []byte { @@ -70,6 +71,7 @@ func newOptimizer(paramWithConfigs ParameterWithConfig, State []byte) *optimizer cstate = unsafe.Pointer(&s[0]) } + o.config = c o.opt = C.paddle_create_optimizer((*C.uchar)(&c[0]), C.int(len(c)), C.paddle_element_type(p.ElementType), cbuffer, C.int(paramBufferSize), (*C.char)(cstate), C.int(len(s))) return o diff --git a/go/pserver/service.go b/go/pserver/service.go index 7d297c46d03bf78d18ca9830a318968397119d3e..25751540a9a2dff043c14e0912bfab1aaa938ab4 100644 --- a/go/pserver/service.go +++ b/go/pserver/service.go @@ -25,11 +25,13 @@ import ( "fmt" "io/ioutil" "os" - "path/filepath" + "path" "strconv" "sync" "time" + uuid "github.com/satori/go.uuid" + log "github.com/sirupsen/logrus" ) @@ -42,9 +44,9 @@ var ErrCheckpointNotFound = errors.New("checkpoint not found") // RPC error message. const ( - AlreadyInitialized = "pserver already initialized" - Uninitialized = "pserver not fully initialized" - CheckpointMD5Failed = "checkpoint file MD5 validation failed" + AlreadyInitialized = "pserver already initialized" + Uninitialized = "pserver not fully initialized" + WrongChecksum = "checkpoint file checksum validation failed" ) // Supported element types. @@ -73,11 +75,12 @@ type ParameterWithConfig struct { // checkpointMeta saves checkpoint metadata type checkpointMeta struct { UUID string `json:"uuid"` + Path string `json:"path"` MD5 string `json:"md5"` Timestamp int64 `json:"timestamp"` } -// Checkpoint is the pserver shard persist in file +// Checkpoint is the pserver shard persist in file. type Checkpoint []parameterCheckpoint // Gradient is the gradient of the parameter. @@ -90,50 +93,58 @@ type Service struct { checkpointInterval time.Duration checkpointPath string client *EtcdClient - mu sync.Mutex - optMap map[string]*optimizer + + mu sync.Mutex + optMap map[string]*optimizer } -// parameterCheckpoint saves parameter checkpoint +// parameterCheckpoint saves parameter checkpoint. type parameterCheckpoint struct { ParameterWithConfig State []byte } -// NewCheckpointFromFile loads parameters and state from checkpoint file -func NewCheckpointFromFile(cpPath string, idx int, e *EtcdClient) (Checkpoint, error) { - v, err := e.GetKey(PsPath+string(idx), 3*time.Second) +func loadMeta(e *EtcdClient, idx int) (meta checkpointMeta, err error) { + v, err := e.GetKey(PsCheckpoint+strconv.Itoa(idx), 3*time.Second) if err != nil { - return nil, err + return } if len(v) == 0 { - return nil, ErrCheckpointNotFound + err = ErrCheckpointNotFound + return } - var cpMeta checkpointMeta - if err = json.Unmarshal(v, &cpMeta); err != nil { - return nil, err + if err = json.Unmarshal(v, &meta); err != nil { + return } - fn := filepath.Join(cpPath, cpMeta.UUID) - if _, err = os.Stat(fn); os.IsNotExist(err) { + return +} + +// LoadCheckpoint loads checkpoint from file. +func LoadCheckpoint(e *EtcdClient, idx int) (Checkpoint, error) { + cpMeta, err := loadMeta(e, idx) + if err != nil { return nil, err } - content, err := ioutil.ReadFile(fn) + + content, err := ioutil.ReadFile(cpMeta.Path) if err != nil { return nil, err } + // TODO(helin): change MD5 to CRC since CRC is better for file + // checksum in our use case (emphasize speed over security). h := md5.New() md5 := hex.EncodeToString(h.Sum(content)) if md5 != cpMeta.MD5 { - return nil, errors.New(CheckpointMD5Failed) + return nil, errors.New(WrongChecksum) } dec := gob.NewDecoder(bytes.NewReader(content)) - cp := Checkpoint{} - if err = dec.Decode(cp); err != nil { + var cp Checkpoint + if err = dec.Decode(&cp); err != nil { return nil, err } return cp, nil @@ -193,6 +204,15 @@ func (s *Service) FinishInitParams(_ int, _ *int) error { } close(s.initialized) + go func() { + t := time.Tick(s.checkpointInterval) + for range t { + err := s.checkpoint() + if err != nil { + log.Errorln(err) + } + } + }() return nil } @@ -240,23 +260,36 @@ func (s *Service) GetParam(name string, parameter *Parameter) error { return nil } -// pserver save checkpoint -func (s *Service) doCheckpoint() (err error) { - <-s.initialized - s.mu.Lock() - defer s.mu.Unlock() +func traceTime(start time.Time, name string) { + elapsed := time.Since(start) + log.Infof("%s took %v", name, elapsed) +} + +// checkpoint saves checkpoint to disk. +// +// checkpoint should be only called after the parameters are +// initialized. +func (s *Service) checkpoint() (err error) { + log.Infoln("Begin save checkpoint.") + defer traceTime(time.Now(), "save checkpoint") + s.mu.Lock() cp := make([]parameterCheckpoint, len(s.optMap)) index := 0 + // TODO(helin): write checkpoint incrementally to reduce memory + // footprint during checkpoint. for name, opt := range s.optMap { var pc parameterCheckpoint pc.Param.Name = name pc.Param.ElementType = opt.elementType pc.Param.Content = opt.GetWeights() + pc.Config = opt.config pc.State = opt.GetStates() cp[index] = pc index++ } + s.mu.Unlock() + var buf bytes.Buffer encoder := gob.NewEncoder(&buf) err = encoder.Encode(cp) @@ -264,32 +297,9 @@ func (s *Service) doCheckpoint() (err error) { return } - cpMeta := checkpointMeta{} - cpMeta.UUID = s.checkpointPath + strconv.Itoa(s.idx) - cpMeta.Timestamp = time.Now().UnixNano() - h := md5.New() - cpMeta.MD5 = hex.EncodeToString(h.Sum(buf.Bytes())) - - cpMetajson, err := json.Marshal(cpMeta) - if err != nil { - return - } - - err = s.client.PutKey(filepath.Join(PsCheckpoint, strconv.Itoa(s.idx)), cpMetajson, 3*time.Second) - if err != nil { - return - } - if _, err = os.Stat(cpMeta.UUID); os.IsNotExist(err) { - log.Info("checkpoint does not exists.") - } else { - err = os.Remove(cpMeta.UUID) - if err != nil { - log.Infof("Removing checkpoint %s failed", cpMeta.UUID) - } else { - log.Infof("checkpoint %s already exsits, removing ", cpMeta.UUID) - } - } - f, err := os.Create(cpMeta.UUID) + id := uuid.NewV4().String() + p := path.Join(s.checkpointPath, id) + f, err := os.Create(p) if err != nil { return } @@ -317,5 +327,43 @@ func (s *Service) doCheckpoint() (err error) { return } + oldMeta, err := loadMeta(s.client, s.idx) + if err == ErrCheckpointNotFound { + log.Infoln("Do not have existing checkpoint.") + err = nil + } + + if err != nil { + return + } + + h := md5.New() + md5 := hex.EncodeToString(h.Sum(buf.Bytes())) + cpMeta := checkpointMeta{ + UUID: id, + Timestamp: time.Now().UnixNano(), + MD5: md5, + Path: p, + } + + json, err := json.Marshal(cpMeta) + if err != nil { + return + } + + err = s.client.PutKey(PsCheckpoint+strconv.Itoa(s.idx), json, 3*time.Second, false) + if err != nil { + return + } + + if oldMeta.Path != "" { + rmErr := os.Remove(oldMeta.Path) + if rmErr != nil { + // log error, but still treat checkpoint as + // successful. + log.Errorln(rmErr) + } + } + return } diff --git a/go/pserver/service_test.go b/go/pserver/service_test.go index 988f3b5acb82a95aeb54af2b8b0e4d39a458291a..be648cd1e83e4f7790edac5842db432fb4870072 100644 --- a/go/pserver/service_test.go +++ b/go/pserver/service_test.go @@ -30,7 +30,7 @@ const ( func TestServiceFull(t *testing.T) { var cp pserver.Checkpoint - s, err := pserver.NewService(0, 1, "", nil, cp) + s, err := pserver.NewService(0, time.Hour, "", nil, cp) if err != nil { t.Error(err) } @@ -102,7 +102,7 @@ func TestServiceFull(t *testing.T) { func TestMultipleInit(t *testing.T) { var cp pserver.Checkpoint - s, err := pserver.NewService(0, 1, "", nil, cp) + s, err := pserver.NewService(0, time.Hour, "", nil, cp) if err != nil { t.Fatal(err) } @@ -119,7 +119,7 @@ func TestMultipleInit(t *testing.T) { func TestUninitialized(t *testing.T) { var cp pserver.Checkpoint - s, err := pserver.NewService(0, 1, "", nil, cp) + s, err := pserver.NewService(0, time.Hour, "", nil, cp) err = s.SendGrad(pserver.Gradient{}, nil) if err.Error() != pserver.Uninitialized { t.Fatal(err) @@ -128,7 +128,7 @@ func TestUninitialized(t *testing.T) { func TestBlockUntilInitialized(t *testing.T) { var cp pserver.Checkpoint - s, err := pserver.NewService(0, 1, "", nil, cp) + s, err := pserver.NewService(0, time.Hour, "", nil, cp) if err != nil { t.Error(err) } diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index f8a88cf317aee6c5dd25e4cc25d588c6c50fcbce..cf61a243e9df2fd4a580e41f07cb0a22dcc72083 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -22,7 +22,5 @@ if(WITH_C_API) endif() if(WITH_SWIG_PY) - configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in - ${CMAKE_CURRENT_SOURCE_DIR}/setup.py) add_subdirectory(api) endif() diff --git a/paddle/api/CMakeLists.txt b/paddle/api/CMakeLists.txt index 84da89a1422b6095b995744cebb6a3af98a071c6..7a1e8b8b26ac6330c3799b7dfeb4447e171fe0f1 100644 --- a/paddle/api/CMakeLists.txt +++ b/paddle/api/CMakeLists.txt @@ -82,9 +82,7 @@ SWIG_LINK_LIBRARIES(swig_paddle add_custom_command(OUTPUT ${PROJ_ROOT}/paddle/py_paddle/_swig_paddle.so COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/swig_paddle.py ${PROJ_ROOT}/paddle/py_paddle COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/_swig_paddle.so ${PROJ_ROOT}/paddle/py_paddle - COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel - COMMAND ${CMAKE_COMMAND} -E touch dist/.timestamp - COMMAND rm -rf py_paddle.egg-info build + COMMAND ${CMAKE_COMMAND} -E touch .timestamp WORKING_DIRECTORY ${PROJ_ROOT}/paddle DEPENDS _swig_paddle ) @@ -92,10 +90,6 @@ add_custom_command(OUTPUT ${PROJ_ROOT}/paddle/py_paddle/_swig_paddle.so # TODO(yuyang18) : make wheel name calculated by cmake add_custom_target(python_api_wheel ALL DEPENDS ${PROJ_ROOT}/paddle/py_paddle/_swig_paddle.so) -install(DIRECTORY ${CMAKE_SOURCE_DIR}/paddle/dist/ - DESTINATION opt/paddle/share/wheels -) - if(WITH_TESTING) IF(NOT PY_PIP_FOUND) SET(PIP_SOURCES_DIR ${PYTHON_SOURCES_DIR}/pip) @@ -108,7 +102,7 @@ if(WITH_TESTING) BUILD_COMMAND "" INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install BUILD_IN_SOURCE 1 - DEPENDS python setuptools python_api_wheel + #DEPENDS python setuptools python_api_wheel ) ENDIF() add_subdirectory(test) diff --git a/paddle/cuda/CMakeLists.txt b/paddle/cuda/CMakeLists.txt index 73ffa690d9d91b673079fc0ecf91f17cbabfdb1e..0865b02c4f275f3d5069109917b05dff1393fc1e 100755 --- a/paddle/cuda/CMakeLists.txt +++ b/paddle/cuda/CMakeLists.txt @@ -39,6 +39,7 @@ set(CUDA_CU_SOURCES src/hl_cuda_lstm.cu src/hl_top_k.cu src/hl_batch_transpose.cu + src/hl_batch_norm.cu src/hl_cuda_sequence.cu src/hl_table_apply.cu) diff --git a/paddle/cuda/include/hl_batch_norm.h b/paddle/cuda/include/hl_batch_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..afc5e0b2deacc4aadf98b3f7ce115e534bbc5124 --- /dev/null +++ b/paddle/cuda/include/hl_batch_norm.h @@ -0,0 +1,48 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifndef HL_BATCH_NORM_H_ +#define HL_BATCH_NORM_H_ + +#include "hl_base.h" + +/** + * @brief batch norm inferece. + * + * @param[in] input input data. + * @param[out] output output data. + * @param[in] scale batch normalization scale parameter (in original + * paper scale is referred to as gamma). + * @param[in] bias batch normalization bias parameter (in original + * paper scale is referred to as beta). + * @param[in] estimatedMean + * @param[in] estimatedVar The moving mean and variance + * accumulated during the training phase are passed + * as inputs here. + * @param[in] epsilon Epsilon value used in the batch + * normalization formula. + */ +extern void hl_batch_norm_cuda_inference(const real* input, + real* output, + const real* scale, + const real* bias, + const real* estimatedMean, + const real* estimatedVar, + const double epsilon, + size_t batchSize, + size_t channel, + size_t height, + size_t width); + +#endif // HL_BATCH_NORM_H_ diff --git a/paddle/cuda/src/hl_batch_norm.cu b/paddle/cuda/src/hl_batch_norm.cu new file mode 100644 index 0000000000000000000000000000000000000000..5828ecb8e049c2f0573ab8547164794bef6db1ca --- /dev/null +++ b/paddle/cuda/src/hl_batch_norm.cu @@ -0,0 +1,66 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "hl_batch_norm.h" + +__global__ void batchNormInference(real* output, + const real* input, + const real* scale, + const real* bias, + const real* estimatedMean, + const real* estimatedVar, + const double epsilon, + size_t batchSize, + size_t channel, + size_t height, + size_t width) { + const int tid = threadIdx.x; + const int num = channel * height * width; + const int batch = blockIdx.x; + for (int i = tid; i < num; i += blockDim.x) { + const int c = i / (height * width); + const int id = batch * num + i; + real val = input[id] - estimatedMean[c]; + val /= sqrt(estimatedVar[c] + epsilon); + val *= scale[c]; + val += bias[c]; + output[id] = val; + } +} + +void hl_batch_norm_cuda_inference(const real* input, + real* output, + const real* scale, + const real* bias, + const real* estimatedMean, + const real* estimatedVar, + const double epsilon, + size_t batchSize, + size_t channel, + size_t height, + size_t width) { + batchNormInference<<>>(output, + input, + scale, + bias, + estimatedMean, + estimatedVar, + epsilon, + batchSize, + channel, + height, + width); + + CHECK_SYNC("hl_batch_norm_cuda_inference failed!"); +} diff --git a/paddle/cuda/src/hl_cuda_cudnn.cc b/paddle/cuda/src/hl_cuda_cudnn.cc index 7ad8a39768a064140a08c912a5a467bc24a12adf..78642a17443b0b4d81defaa46579332ef20c71a1 100644 --- a/paddle/cuda/src/hl_cuda_cudnn.cc +++ b/paddle/cuda/src/hl_cuda_cudnn.cc @@ -1023,14 +1023,6 @@ void hl_batch_norm_forward_inference(hl_tensor_descriptor inputDesc, real beta = 1.0f; cudnnBatchNormMode_t mode = CUDNN_BATCHNORM_SPATIAL; - int batch_size = ((cudnn_tensor_descriptor)inputDesc)->batch_size; - if (batch_size > 1024 && g_cudnn_lib_version < 6000) { - LOG(INFO) << " To process current batch data with size " << batch_size - << " (>1024), cudnnBatchNorm requires cuDNN version >= 6000." - << " If there is an error complaining CUDNN_STATUS_NOT_SUPPORTED," - << " just recompile PaddlePaddle with cuDNN >= 6000, replacing" - << " current version " << g_cudnn_lib_version; - } CHECK_CUDNN( dynload::cudnnBatchNormalizationForwardInference(t_resource.cudnn_handle, mode, diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 1db042c6fc8b6c4ea7c3854ea4b1cd016deeb0b6..6601918c90b967a659bceb8535fa0b53844b6f43 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -7,6 +7,9 @@ cc_library(tensor SRCS tensor.cc DEPS ddim place paddle_memory device_context) cc_test(tensor_test SRCS tensor_test.cc DEPS tensor) cc_test(eigen_test SRCS eigen_test.cc DEPS tensor) +cc_library(lod_tensor SRCS lod_tensor.cc details/lod_tensor.cc DEPS ddim place tensor) +cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor) + cc_test(variable_test SRCS variable_test.cc) cc_library(scope SRCS scope.cc) @@ -35,12 +38,17 @@ add_dependencies(framework_py_proto framework_py_proto_init) cc_library(backward SRCS backward.cc DEPS net_op) cc_test(backward_test SRCS backward_test.cc DEPS backward) + +if(WITH_PYTHON) cc_library(paddle_pybind SHARED SRCS pybind.cc DEPS pybind python backward - fc_op - sgd_op - add_op - mean_op - cross_entropy_op - recurrent_op) + fc_op + sgd_op + add_op + mean_op + cross_entropy_op + recurrent_op + uniform_random_op + fill_zeros_like_op) +endif(WITH_PYTHON) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 13706f8b562a1d68fe0d603f51c2fb47b4e18164..47983110fa618e89d455a311af2112fc0ff2b9ae 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -13,6 +13,7 @@ limitations under the License. */ #include "paddle/framework/backward.h" + #include #include "paddle/framework/op_registry.h" #include "paddle/operators/net_op.h" diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 6c6e12ca254553a8fc02cadbe3a99989ee848943..6d5835bd2236118b6aff95743c4319faceb05d89 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -17,16 +17,21 @@ #include #include "paddle/framework/op_registry.h" #include "paddle/operators/net_op.h" -#include "paddle/operators/type_alias.h" namespace paddle { namespace framework { +using OperatorBase = framework::OperatorBase; +using OpProtoAndCheckerMaker = framework::OpProtoAndCheckerMaker; +using OpProto = framework::OpProto; +using OpAttrChecker = framework::OpAttrChecker; +using Scope = framework::Scope; +using DeviceContext = platform::DeviceContext; + class EmptyOp : public OperatorBase { public: void InferShape(const Scope &scope) const override {} - void Run(const Scope &scope, - const platform::DeviceContext &dev_ctx) const override {} + void Run(const Scope &scope, const DeviceContext &dev_ctx) const override {} }; class RowWiseAddOpMaker : public OpProtoAndCheckerMaker { @@ -71,7 +76,7 @@ class NoGradOpMaker : public OpProtoAndCheckerMaker { } }; -class FcOp : public ops::NetOp { +class FcOp : public operators::NetOp { public: void Init() override { AddOp(OpRegistry::CreateOp("mul", {Input("X"), Input("W")}, @@ -143,6 +148,7 @@ class AddOpMaker : public OpProtoAndCheckerMaker { } // namespace paddle namespace f = paddle::framework; +namespace ops = paddle::operators; using EnforceNotMet = paddle::platform::EnforceNotMet; REGISTER_OP(rowwise_add, f::EmptyOp, f::RowWiseAddOpMaker); REGISTER_GRADIENT_OP(rowwise_add, rowwise_add_grad, f::EmptyOp); diff --git a/paddle/framework/details/lod_tensor.cc b/paddle/framework/details/lod_tensor.cc new file mode 100644 index 0000000000000000000000000000000000000000..9ad3979e5b511517f75d2d43004f97ee1576953b --- /dev/null +++ b/paddle/framework/details/lod_tensor.cc @@ -0,0 +1,62 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/framework/lod_tensor.h" + +#include + +namespace paddle { +namespace framework { +namespace details { + +using LOD = LODTensor::LOD; + +std::shared_ptr SliceLOD(const LOD &lod, size_t level_begin, + size_t level_end) { + auto new_lod = std::make_shared(); + new_lod->reserve(level_end - level_begin); + for (size_t i = level_begin; i < level_end; i++) { + new_lod->emplace_back(lod[i]); + } + return new_lod; +} + +std::shared_ptr SliceLOD(const LOD &lod, size_t level, size_t elem_begin, + size_t elem_end, bool tensor_shared) { + // slice the lod. + auto new_lod = std::make_shared(); + new_lod->reserve(lod.size() - level); + auto start = lod.at(level)[elem_begin]; + auto end = lod.at(level)[elem_end]; + + for (auto it = lod.begin() + level; it != lod.end(); it++) { + auto it_begin = std::find(it->begin(), it->end(), start); + auto it_end = std::find(it_begin, it->end(), end); + PADDLE_ENFORCE(it_begin != it->end(), "error in parsing lod info"); + PADDLE_ENFORCE(it_end != it->end(), "error in parsing lod info"); + new_lod->emplace_back(it_begin, it_end + 1); + if (!tensor_shared) { + // reset offset if tensor is copyed and sliced. + std::transform(new_lod->back().begin(), new_lod->back().end(), + new_lod->back().begin(), + [start](int v) { return v - start; }); + PADDLE_ENFORCE(new_lod->back().front() == 0, "error in slice LOD"); + } + } + return new_lod; +} + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/details/lod_tensor.h b/paddle/framework/details/lod_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..9a6a6cd2ea41f02db991bdc0a2b917433dafed99 --- /dev/null +++ b/paddle/framework/details/lod_tensor.h @@ -0,0 +1,46 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include + +namespace paddle { +namespace framework { +namespace details { + +/* + * Slice levels from LOD. + * + * @lod: LOD to slice. + * @level_begin: level to begin slice. + * @level_end: level to end slice. + */ +std::shared_ptr SliceLOD(const LODTensor::LOD &lod, + size_t level_begin, size_t level_end); + +/* + * Slice elements from a level of LOD. + * + * @lod: LOD to slice. + * @level: which level to slice. + * @elem_begin: element's index to begin slice. + * @elem_end: element's index to end slice. + */ +std::shared_ptr SliceLOD(const LODTensor::LOD &lod, + size_t level, size_t elem_begin, + size_t elem_end, bool tensor_shared); +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/lod_tensor.cc b/paddle/framework/lod_tensor.cc new file mode 100644 index 0000000000000000000000000000000000000000..70045dbf7afd0935e4df852b2f0e3ecd163a9316 --- /dev/null +++ b/paddle/framework/lod_tensor.cc @@ -0,0 +1,51 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/framework/lod_tensor.h" + +#include + +namespace paddle { +namespace framework { + +LODTensor LODTensor::SliceShared(size_t level_begin, size_t level_end) const { + PADDLE_ENFORCE(HasLOD(), "has no LOD info, can't be sliced."); + auto new_lod = details::SliceLOD(*lod_start_pos_, level_begin, level_end); + // slice levels just need to update LOD info, each level will contains the + // whole tensor_, so no need to modify tensor_. + return LODTensor(tensor_, new_lod); +} + +LODTensor LODTensor::SliceShared(size_t level, size_t elem_begin, + size_t elem_end) const { + PADDLE_ENFORCE(HasLOD(), "has no LOD info, can't be sliced."); + PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, + NumLevels()); + PADDLE_ENFORCE(elem_begin < NumElements(level), + "element begin [%d] out of range [%d]", elem_begin, + NumElements(level)); + PADDLE_ENFORCE(elem_end < NumElements(level) + 1, + "element end [%d] out of range [%d]", elem_end, + NumElements(level)); + + auto new_lod = details::SliceLOD(*lod_start_pos_, level, elem_begin, elem_end, + true /*tensor_shared*/); + + // slice elements just need to update LOD info, because offsets are not + // changed, so the original tensor_ can be reused. + return LODTensor(tensor_, new_lod); +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..4933479b109694312e99595dc8ad6db70259efa6 --- /dev/null +++ b/paddle/framework/lod_tensor.h @@ -0,0 +1,145 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include +#if (!PADDLE_ONLY_CPU) +#include +#include +#endif + +#include "paddle/framework/ddim.h" +#include "paddle/framework/tensor.h" +#include "paddle/platform/enforce.h" + +namespace paddle { +namespace framework { + +/* + * LODTensor (Level of details Tensor) + * see https://en.wikipedia.org/wiki/Level_of_details for reference. + */ +class LODTensor { + public: +// Level save offsets of each unit. +#ifdef PADDLE_ONLY_CPU + using Level = std::vector; +#else + using Level = thrust::device_vector; +#endif + // LOD stores offsets of each level of units, the largest units level first, + // then the smaller units level. Each Level stores the offsets of units in + // Tesor. + typedef std::vector LOD; + + LODTensor() {} + LODTensor(const std::shared_ptr &tensor, + const std::shared_ptr &lod) { + Reset(tensor, lod); + } + + void Reset(const std::shared_ptr &tensor, + const std::shared_ptr &lod) { + tensor_ = tensor; + lod_start_pos_ = lod; + } + + /* + * Get a element from LOD. + */ + size_t lod_element(size_t level, size_t elem) const { + PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, + NumLevels()); + PADDLE_ENFORCE(elem < NumElements(level), + "element begin [%d] out of range [%d]", elem, + NumElements(level)); + return (*lod_start_pos_)[level][elem]; + } + + /* + * Number of LODTensor's levels, each level has units of data, for example, + * in the sentence's view, article, paragraph, sentence are 3 levels. + */ + size_t NumLevels() const { + return lod_start_pos_ ? lod_start_pos_->size() : 0UL; + } + /* + * Number of elements in a level. + */ + size_t NumElements(size_t level = 0) const { + PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, + NumLevels()); + // the last offset is the end of last element + return lod_start_pos_->at(level).size() - 1; + } + + /* + * Slice of levels[level_begin:level_end], with tensor copied. + */ + template + LODTensor SliceCopied(size_t level_begin, size_t level_end, + const platform::Place &dst_place) const; + + /* + * Slice of levels[level_begin:level_end], with tensor shared. + */ + LODTensor SliceShared(size_t level_begin, size_t level_end) const; + + /* + * Slice of elements of a level, [elem_begin: elem_end], with tensor copied. + * @note: low performance in slice lod_start_pos_. + */ + template + LODTensor SliceCopied(size_t level, size_t elem_begin, size_t elem_end, + const platform::Place &dst_place) const; + + /* + * Slice of elements of a level, [elem_begin: elem_end], with tensor shared. + * @note: low performance in slice lod_start_pos_. + */ + LODTensor SliceShared(size_t level, size_t elem_begin, size_t elem_end) const; + + /* + * Copy other's lod_start_pos_, to share LOD info. + * @note: the LOD info should not be changed. + */ + void ShareLOD(const LODTensor &other) { + lod_start_pos_ = other.lod_start_pos_; + } + + /* + * Copy other's lod_start_pos_'s content, free to mutate. + */ + void CopyLOD(const LODTensor &other) { + lod_start_pos_ = std::make_shared(*other.lod_start_pos_); + } + /* + * Determine whether LODTensor has a valid LOD info. + */ + bool HasLOD() const { return bool(lod_start_pos_); } + LOD *lod() const { return lod_start_pos_.get(); } + + std::shared_ptr &tensor() { return tensor_; } + Tensor *raw_tensor() { return tensor_.get(); } + + private: + std::shared_ptr lod_start_pos_; + std::shared_ptr tensor_; +}; + +} // namespace framework +} // namespace paddle + +#include "paddle/framework/lod_tensor_impl.h" diff --git a/paddle/framework/lod_tensor_impl.h b/paddle/framework/lod_tensor_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..0eb6469aea3ae25f035751da985b5bebb489d961 --- /dev/null +++ b/paddle/framework/lod_tensor_impl.h @@ -0,0 +1,60 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include "paddle/framework/details/lod_tensor.h" + +namespace paddle { +namespace framework { + +template +LODTensor LODTensor::SliceCopied(size_t level_begin, size_t level_end, + const platform::Place &dst_place) const { + PADDLE_ENFORCE(HasLOD(), "has no LOD info, can't be sliced."); + auto new_lod = details::SliceLOD(*lod_start_pos_, level_begin, level_end); + auto new_tensor = std::make_shared(); + new_tensor->CopyFrom(*tensor_, dst_place); + + return LODTensor(new_tensor, new_lod); +} + +template +LODTensor LODTensor::SliceCopied(size_t level, size_t elem_begin, + size_t elem_end, + const platform::Place &dst_place) const { + PADDLE_ENFORCE(HasLOD(), "has no LOD info, can't be sliced."); + PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, + NumLevels()); + PADDLE_ENFORCE(elem_begin < NumElements(level), + "element begin [%d] out of range [%d]", elem_begin, + NumElements(level)); + PADDLE_ENFORCE(elem_end < NumElements(level) + 1, + "element end [%d] out of range [%d]", elem_end, + NumElements(level)); + + auto new_lod = details::SliceLOD(*lod_start_pos_, level, elem_begin, elem_end, + false /*tensor_shared*/); + + auto start_idx = new_lod->front().front(); + auto end_idx = new_lod->front().back() - 1 /*the next element's start*/; + auto sliced_tensor = tensor_->Slice(start_idx, end_idx); + auto new_tensor = std::make_shared(); + new_tensor->CopyFrom(sliced_tensor, dst_place); + + return LODTensor(new_tensor, new_lod); +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/lod_tensor_test.cc b/paddle/framework/lod_tensor_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..511716375e81e8fd89b071c940ee97327c268b8b --- /dev/null +++ b/paddle/framework/lod_tensor_test.cc @@ -0,0 +1,165 @@ +/* + Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "paddle/framework/lod_tensor.h" + +#include +#include +#include + +namespace paddle { +namespace framework { + +class LODTensorTester : public ::testing::Test { + public: + virtual void SetUp() override { + lod_tensor.reset(new LODTensor); + // tensor's batch_size: 30 + // 3 levels + // 0 10 20 + // 0 5 10 15 20 + // 0 2 5 7 10 12 15 20 + auto lod = std::make_shared(); + lod->push_back(std::vector{0, 10, 20}); + lod->push_back(std::vector{0, 5, 10, 15, 20}); + lod->push_back(std::vector{0, 2, 5, 7, 10, 12, 15, 17, 20}); + + auto tensor = std::make_shared(); + tensor->Resize({20 /*batch size*/, 128 /*dim*/}); + // malloc memory + tensor->mutable_data(place); + + lod_tensor->Reset(tensor, lod); + } + + protected: + std::unique_ptr lod_tensor; + platform::CPUPlace place; +}; + +TEST_F(LODTensorTester, NumLevels) { ASSERT_EQ(lod_tensor->NumLevels(), 3UL); } + +TEST_F(LODTensorTester, NumElements) { + ASSERT_EQ(lod_tensor->NumElements(0), 2UL); + ASSERT_EQ(lod_tensor->NumElements(1), 4UL); + ASSERT_EQ(lod_tensor->NumElements(2), 8UL); +} + +TEST_F(LODTensorTester, SliceShared_Level) { + // slice 1 level + for (size_t level = 0; level < 3UL; ++level) { + auto new_lod_tensor = lod_tensor->SliceShared(level, level + 1); + ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL); + ASSERT_EQ(new_lod_tensor.NumElements(0UL), lod_tensor->NumElements(level)); + ASSERT_EQ(new_lod_tensor.tensor(), lod_tensor->tensor()); + } + // slice 2 level + for (size_t level = 0; level < 2UL; ++level) { + auto new_lod_tensor = lod_tensor->SliceShared(level, level + 2); + ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); + ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor->NumElements(level)); + ASSERT_EQ(new_lod_tensor.NumElements(1), + lod_tensor->NumElements(level + 1)); + ASSERT_EQ(new_lod_tensor.tensor(), lod_tensor->tensor()); + } +} + +TEST_F(LODTensorTester, SliceCopied_Level) { + // slice 1 level + for (size_t level = 0; level < 3UL; ++level) { + auto new_lod_tensor = + lod_tensor->SliceCopied(level, level + 1, place); + ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL); + ASSERT_EQ(new_lod_tensor.NumElements(0UL), lod_tensor->NumElements(level)); + // ASSERT_EQ(new_lod_tensor.tensor(), lod_tensor->tensor()); + // TODO(superjom) add tensor comparation here. + } + // slice 2 level + for (size_t level = 0; level < 2UL; ++level) { + auto new_lod_tensor = + lod_tensor->SliceCopied(level, level + 2, place); + ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); + ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor->NumElements(level)); + ASSERT_EQ(new_lod_tensor.NumElements(1), + lod_tensor->NumElements(level + 1)); + // ASSERT_EQ(new_lod_tensor.tensor(), lod_tensor->tensor()); + // TODO(superjom) add tensor comparation here. + } +} + +TEST_F(LODTensorTester, SliceShared_Element) { + size_t level = 0; + auto new_lod_tensor = lod_tensor->SliceShared(level, 0, 2); + ASSERT_EQ(new_lod_tensor.NumLevels(), 3UL); + ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL); + ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL); + ASSERT_EQ(new_lod_tensor.NumElements(2), 8UL); + ASSERT_EQ(new_lod_tensor.raw_tensor(), lod_tensor->raw_tensor()); + + level = 1; + new_lod_tensor = lod_tensor->SliceShared(level, 0, 2); + ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); + ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL); + ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL); + ASSERT_EQ(new_lod_tensor.raw_tensor(), lod_tensor->raw_tensor()); +} + +TEST_F(LODTensorTester, SliceCopied_Element) { + size_t level = 0; + auto new_lod_tensor = lod_tensor->SliceCopied(level, 0, 2, place); + ASSERT_EQ(new_lod_tensor.NumLevels(), 3UL); + ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL); + ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL); + ASSERT_EQ(new_lod_tensor.NumElements(2), 8UL); + ASSERT_NE(new_lod_tensor.raw_tensor(), lod_tensor->raw_tensor()); + + level = 1; + new_lod_tensor = lod_tensor->SliceCopied(level, 0, 2, place); + ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); + ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL); + ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL); + ASSERT_NE(new_lod_tensor.raw_tensor(), lod_tensor->raw_tensor()); + + level = 1; + // LOD is + // 0 5 10 + // 0 2 5 7 10 + new_lod_tensor = lod_tensor->SliceCopied(level, 1, 3, place); + ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); + ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL); + ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL); + + ASSERT_EQ(new_lod_tensor.lod_element(0, 0), 0UL); + ASSERT_EQ(new_lod_tensor.lod_element(0, 1), 5UL); + ASSERT_EQ(new_lod_tensor.lod_element(1, 0), 0UL); + ASSERT_EQ(new_lod_tensor.lod_element(1, 1), 2UL); + ASSERT_EQ(new_lod_tensor.lod_element(1, 2), 5UL); + ASSERT_EQ(new_lod_tensor.lod_element(1, 3), 7UL); + + // TODO(superjom) compare the content of these tensors +} + +TEST_F(LODTensorTester, ShareLOD) { + LODTensor new_lod_tensor; + new_lod_tensor.ShareLOD(*lod_tensor); + ASSERT_EQ(new_lod_tensor.lod(), lod_tensor->lod()); +} + +TEST_F(LODTensorTester, CopyLOD) { + LODTensor new_lod_tensor; + new_lod_tensor.CopyLOD(*lod_tensor); + ASSERT_NE(new_lod_tensor.lod(), lod_tensor->lod()); +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index beb6793289812cfaa6991d28379126ff29fa2547..d9a013b883abdec4422806f90e36da7410a4fa0c 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -34,8 +34,8 @@ ExecutionContext::GetEigenDevice() const { #endif const std::string& OperatorBase::Input(const std::string& name) const { - PADDLE_ENFORCE(in_out_idxs_ != nullptr, - "Input Output Indices could not be nullptr"); + PADDLE_ENFORCE_NOT_NULL(in_out_idxs_, + "Input Output Indices could not be nullptr"); auto it = in_out_idxs_->find(name); PADDLE_ENFORCE(it != in_out_idxs_->end(), "no key [%s] in in_out_idxs_", name); @@ -49,7 +49,7 @@ const std::string& OperatorBase::Input(const std::string& name) const { } std::vector OperatorBase::Inputs(const std::string& name) const { - PADDLE_ENFORCE(in_out_idxs_ != nullptr, "IO Idx could not be nullptr"); + PADDLE_ENFORCE_NOT_NULL(in_out_idxs_, "IO Idx could not be nullptr"); auto input_format = GetAttr>("input_format"); auto offset = in_out_idxs_->at(name); PADDLE_ENFORCE(input_format.at(static_cast(offset) + 1) <= @@ -62,7 +62,7 @@ std::vector OperatorBase::Inputs(const std::string& name) const { } const std::string& OperatorBase::Output(const std::string& name) const { - PADDLE_ENFORCE(in_out_idxs_ != nullptr, "InOut Indice could not be nullptr"); + PADDLE_ENFORCE_NOT_NULL(in_out_idxs_, "InOut Indice could not be nullptr"); auto it = in_out_idxs_->find(name); PADDLE_ENFORCE(it != in_out_idxs_->end(), "no key [%s] in in_out_idxs_", name); @@ -76,7 +76,7 @@ const std::string& OperatorBase::Output(const std::string& name) const { } std::vector OperatorBase::Outputs(const std::string& name) const { - PADDLE_ENFORCE(in_out_idxs_ != nullptr, "InOut Indice could not be nullptr"); + PADDLE_ENFORCE_NOT_NULL(in_out_idxs_, "InOut Indice could not be nullptr"); auto output_format = GetAttr>("output_format"); auto offset = in_out_idxs_->at(name); PADDLE_ENFORCE(output_format.at(static_cast(offset) + 1) <= diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 7242b6418daed4413b0e637f33250a7eb3ae5282..bf2ce18edc392bf4d2e67f6b28d023bf22bcb62b 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -88,6 +88,8 @@ class OperatorBase { virtual bool IsNetOp() const { return false; } + virtual bool SupportGPU() const { return false; } + /// rename inputs outputs name void Rename(const std::string& old_name, const std::string& new_name); @@ -118,10 +120,10 @@ class OperatorBase { std::shared_ptr> in_out_idxs_; }; -class OperatorContext { +class InferShapeContext { public: - OperatorContext(const OperatorBase* op, const Scope& scope) - : op_(*op), scope_(scope) {} + InferShapeContext(const OperatorBase& op, const Scope& scope) + : op_(op), scope_(scope) {} size_t InputSize() const { return op_.inputs_.size(); } @@ -167,15 +169,15 @@ class OperatorContext { template const T* Input(const size_t index) const { auto var = InputVar(index); - PADDLE_ENFORCE(var != nullptr, "Input(%d) should not be nullptr", index); + PADDLE_ENFORCE_NOT_NULL(var, "Input(%d) should not be nullptr", index); return &var->Get(); } template T* Output(const size_t index) const { auto var = OutputVar(index); - PADDLE_ENFORCE( - var != nullptr, + PADDLE_ENFORCE_NOT_NULL( + var, "Output(%d) not be nullptr, which means variable [%s] does not " "exist in scope", index, op_.outputs_[index]); @@ -185,14 +187,14 @@ class OperatorContext { template const T* Input(const std::string& name) const { auto var = InputVar(name); - PADDLE_ENFORCE(var != nullptr, "Input(%s) should not be nullptr", name); + PADDLE_ENFORCE_NOT_NULL(var, "Input(%s) should not be nullptr", name); return &var->Get(); } template T* Output(const std::string& name) const { auto var = OutputVar(name); - PADDLE_ENFORCE(var != nullptr, "Output(%s) should not be nullptr", name); + PADDLE_ENFORCE_NOT_NULL(var, "Output(%s) should not be nullptr", name); return var->GetMutable(); } @@ -204,9 +206,9 @@ class OperatorContext { std::transform(names.begin(), names.end(), std::back_inserter(res), [&](const std::string& sub_name) { auto var = scope_.FindVar(sub_name); - PADDLE_ENFORCE(var != nullptr, - "MultiInput(%s:%s) should not be nullptr", - name, sub_name); + PADDLE_ENFORCE_NOT_NULL( + var, "MultiInput(%s:%s) should not be nullptr", name, + sub_name); return &var->Get(); }); return res; @@ -220,9 +222,9 @@ class OperatorContext { std::transform(names.begin(), names.end(), std::back_inserter(res), [&](const std::string& sub_name) { auto var = scope_.FindVar(sub_name); - PADDLE_ENFORCE(var != nullptr, - "MultiOutput(%s:%s) should not be nullptr", - name, sub_name); + PADDLE_ENFORCE_NOT_NULL( + var, "MultiOutput(%s:%s) should not be nullptr", name, + sub_name); return var->GetMutable(); }); return res; @@ -232,12 +234,6 @@ class OperatorContext { const Scope& scope_; }; -class InferShapeContext : public OperatorContext { - public: - InferShapeContext(const OperatorBase* op, const Scope& scope) - : OperatorContext(op, scope) {} -}; - template struct EigenDeviceConverter; @@ -253,11 +249,11 @@ struct EigenDeviceConverter { }; #endif -class ExecutionContext : public OperatorContext { +class ExecutionContext : public InferShapeContext { public: - ExecutionContext(const OperatorBase* op, const Scope& scope, + ExecutionContext(const OperatorBase& op, const Scope& scope, const platform::DeviceContext* device_context) - : OperatorContext(op, scope), device_context_(device_context) {} + : InferShapeContext(op, scope), device_context_(device_context) {} template , OpKernelHash>; - void InferShape(const Scope& scope) const { - InferShape(InferShapeContext(this, scope)); + void InferShape(const Scope& scope) const override { + InferShape(InferShapeContext(*this, scope)); } void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const final { auto& opKernel = AllOpKernels().at(type_).at(OpKernelKey(dev_ctx)); - opKernel->Compute(ExecutionContext(this, scope, &dev_ctx)); + opKernel->Compute(ExecutionContext(*this, scope, &dev_ctx)); } static std::unordered_map& @@ -328,6 +324,12 @@ class OperatorWithKernel : public OperatorBase { return g_all_op_kernels; } + bool SupportGPU() const override { + OperatorWithKernel::OpKernelKey key; + key.place_ = platform::GPUPlace(); + return OperatorWithKernel::AllOpKernels().at(type_).count(key) != 0; + } + protected: virtual void InferShape(const InferShapeContext& ctx) const = 0; }; diff --git a/paddle/framework/pybind.cc b/paddle/framework/pybind.cc index cbb86c4195a6c7e976fc5e0dd69d77be46dfb17c..915ffb1c007c24eca457a0c646378bc191a15810 100644 --- a/paddle/framework/pybind.cc +++ b/paddle/framework/pybind.cc @@ -18,11 +18,8 @@ limitations under the License. */ #include "paddle/framework/backward.h" #include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" -#include "paddle/framework/scope.h" #include "paddle/framework/tensor_py.h" #include "paddle/operators/net_op.h" -#include "paddle/operators/type_alias.h" #include "paddle/platform/enforce.h" #include "paddle/platform/place.h" #include "pybind11/numpy.h" @@ -32,7 +29,7 @@ limitations under the License. */ namespace py = pybind11; USE_OP(add_two); -USE_OP(onehot_cross_entropy); +USE_OP_CPU(onehot_cross_entropy); USE_OP_WITHOUT_KERNEL(fc); USE_OP(sgd); USE_OP(mul); @@ -40,9 +37,14 @@ USE_OP(mean); USE_OP(sigmoid); USE_OP(softmax); USE_OP(rowwise_add); +USE_OP(fill_zeros_like); USE_OP_WITHOUT_KERNEL(recurrent_op); +USE_OP(uniform_random); namespace paddle { namespace framework { + +using Tensor = framework::Tensor; + template void ExposeOperator(ClassType &m) { m.def("infer_shape", &ClassType::type::InferShape) @@ -55,6 +57,26 @@ void ExposeOperator(ClassType &m) { [](const typename ClassType::type &op) -> std::vector { return op.outputs_; }) + .def("inputs", + [](const typename ClassType::type &op) -> std::vector { + return op.inputs_; + }) + .def("support_gpu", &ClassType::type::SupportGPU) + .def("temp_outputs", + [](const typename ClassType::type &op) -> std::vector { + auto iter = op.attrs_.find("temporary_index"); + std::vector ret; + if (iter == op.attrs_.end()) { + return ret; + } else { + auto tmp_idx = boost::get>(iter->second); + ret.reserve(tmp_idx.size()); + for (auto &index : tmp_idx) { + ret.push_back(op.outputs_.at(index)); + } + return ret; + } + }) .def("__str__", &ClassType::type::DebugString); } @@ -128,8 +150,8 @@ All parameter, weight, gradient are variables in Paddle. [](Variable &self) -> Tensor * { return self.GetMutable(); }, py::return_value_policy::reference) .def("get_net", - [](Variable &self) -> ops::NetOp * { - return self.GetMutable(); + [](Variable &self) -> operators::NetOp * { + return self.GetMutable(); }, py::return_value_policy::reference); @@ -208,23 +230,24 @@ All parameter, weight, gradient are variables in Paddle. ExposeOperator(operator_base); - py::class_> net(m, "Net"); + py::class_> net(m, "Net"); net.def_static("create", - []() -> std::shared_ptr { - auto retv = std::make_shared(); + []() -> std::shared_ptr { + auto retv = std::make_shared(); retv->type_ = "plain_net"; return retv; }) - .def("add_op", &ops::NetOp::AddOp) - .def( - "add_op", - [](ops::NetOp &self, const std::shared_ptr &net) -> void { - self.AddOp(std::static_pointer_cast(net)); - }) - .def("complete_add_op", &ops::NetOp::CompleteAddOp) - .def("complete_add_op", - [](std::shared_ptr &self) { self->CompleteAddOp(); }); + .def("add_op", &operators::NetOp::AddOp) + .def("add_op", + [](operators::NetOp &self, + const std::shared_ptr &net) -> void { + self.AddOp(std::static_pointer_cast(net)); + }) + .def("complete_add_op", &operators::NetOp::CompleteAddOp) + .def("complete_add_op", [](std::shared_ptr &self) { + self->CompleteAddOp(); + }); ExposeOperator(net); diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 2aac8a128a21f0c53ea3785e698d46a3a6e3bded..beb30c6b2dc5a8c5f4ba903b986e82dd408a2e11 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -18,6 +18,8 @@ limitations under the License. */ #include #include #include +#include + #include "paddle/framework/ddim.h" #include "paddle/memory/memory.h" #include "paddle/platform/device_context.h" @@ -129,8 +131,8 @@ class Tensor { memory::PODDeleter(place)), place_(place), size_(size) { - PADDLE_ENFORCE(ptr_ != nullptr, "Insufficient %s memory to allocation.", - is_cpu_place(place_) ? "CPU" : "GPU"); + PADDLE_ENFORCE_NOT_NULL(ptr_, "Insufficient %s memory to allocation.", + (is_cpu_place(place_) ? "CPU" : "GPU")); } virtual size_t size() const { return size_; } diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 92621f8c18ec0d03160a23c462830d14272c7f64..8d9bec6dc9c3f0af822a0d8cd8588dc932970652 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -14,17 +14,18 @@ limitations under the License. */ #pragma once #include "paddle/memory/memcpy.h" +#include "paddle/platform/enforce.h" namespace paddle { namespace framework { template inline void Tensor::check_memory_size() const { - PADDLE_ENFORCE(holder_ != nullptr, - "Tenosr holds no memory. Call Tensor::mutable_data first."); - PADDLE_ENFORCE(holder_->size() >= product(dims_) * sizeof(T) + offset_, - "Tensor's dims_ is out of bound. Call Tensor::mutable_data " - "first to re-allocate memory."); + PADDLE_ENFORCE_NOT_NULL( + holder_, "Tenosr holds no memory. Call Tensor::mutable_data first."); + PADDLE_ENFORCE_GE(holder_->size(), product(dims_) * sizeof(T) + offset_, + "Tensor's dims_ is out of bound. Call Tensor::mutable_data " + "first to re-allocate memory."); } template @@ -51,9 +52,9 @@ inline T* Tensor::mutable_data(DDim dims, platform::Place place) { template inline T* Tensor::mutable_data(platform::Place place) { static_assert(std::is_pod::value, "T must be POD"); - PADDLE_ENFORCE(product(dims_) > 0, - "Tensor's numel must be larger than zero to call " - "Tensor::mutable_data. Call Tensor::set_dim first."); + PADDLE_ENFORCE_GT(product(dims_), 0, + "Tensor's numel must be larger than zero to call " + "Tensor::mutable_data. Call Tensor::set_dim first."); /* some versions of boost::variant don't have operator!= */ size_t size = product(dims_) * sizeof(T); if (holder_ == nullptr || !(holder_->place() == place) || @@ -120,11 +121,11 @@ inline void Tensor::CopyFrom(const Tensor& src, template inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { check_memory_size(); - PADDLE_ENFORCE(begin_idx >= 0, "Slice begin index is less than zero."); - PADDLE_ENFORCE(end_idx <= dims_[0], "Slice end index is out of bound."); - PADDLE_ENFORCE(begin_idx < end_idx, - "Begin index must be less than end index."); - PADDLE_ENFORCE(dims_[0] != 1, "Can not slice a tensor with dims_[0] = 1."); + PADDLE_ENFORCE_GE(begin_idx, 0, "Slice begin index is less than zero."); + PADDLE_ENFORCE_LE(end_idx, dims_[0], "Slice end index is out of bound."); + PADDLE_ENFORCE_LT(begin_idx, end_idx, + "Begin index must be less than end index."); + PADDLE_ENFORCE_NE(dims_[0], 1, "Can not slice a tensor with dims_[0] = 1."); int base = product(dims_) / dims_[0]; Tensor dst; dst.holder_ = holder_; diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc index ef1cc10b840896d9ab97f963fc12a4971cd74e1f..7db38d5caeebccf710334e854faf785ef0f64063 100644 --- a/paddle/framework/tensor_test.cc +++ b/paddle/framework/tensor_test.cc @@ -19,7 +19,7 @@ TEST(Tensor, Dims) { using namespace paddle::framework; using namespace paddle::platform; Tensor tt; - tt.Resize(make_ddim({2, 3, 4})); + tt.Resize({2, 3, 4}); DDim dims = tt.dims(); ASSERT_EQ(arity(dims), 3); for (int i = 0; i < 3; ++i) { @@ -36,7 +36,8 @@ TEST(Tensor, DataAssert) { } catch (paddle::platform::EnforceNotMet err) { caught = true; std::string msg = - "Tenosr holds no memory. Call Tensor::mutable_data first."; + "holder_ should not be null\nTenosr holds no memory. Call " + "Tensor::mutable_data first."; const char* what = err.what(); for (size_t i = 0; i < msg.length(); ++i) { ASSERT_EQ(what[i], msg[i]); @@ -111,7 +112,8 @@ TEST(Tensor, ShareDataWith) { } catch (paddle::platform::EnforceNotMet err) { caught = true; std::string msg = - "Tenosr holds no memory. Call Tensor::mutable_data first."; + "holder_ should not be null\nTenosr holds no memory. Call " + "Tensor::mutable_data first."; const char* what = err.what(); for (size_t i = 0; i < msg.length(); ++i) { ASSERT_EQ(what[i], msg[i]); diff --git a/paddle/function/FunctionTest.cpp b/paddle/function/FunctionTest.cpp index 6360a6e023ebd2f97c442c80c8d7f56b5ec4cbf7..7b0b1c6adbd36a6cbe9b89e9518fbe07fb1db368 100644 --- a/paddle/function/FunctionTest.cpp +++ b/paddle/function/FunctionTest.cpp @@ -93,8 +93,8 @@ TEST(Arguments, Matrix) { MatrixPtr matrix = Matrix::create(100, 200); CheckBufferArg check = [=](const BufferArg& arg) { EXPECT_EQ(arg.shape().ndims(), 2U); - EXPECT_EQ(arg.shape()[0], 100); - EXPECT_EQ(arg.shape()[1], 200); + EXPECT_EQ(arg.shape()[0], 100U); + EXPECT_EQ(arg.shape()[1], 200U); EXPECT_EQ(arg.data(), matrix->getData()); EXPECT_EQ(arg.matrix().getHeight(), matrix->getHeight()); @@ -112,8 +112,8 @@ TEST(Arguments, Matrix) { TEST(Arguments, Vector) { VectorPtr vector = Vector::create(100, false); CheckBufferArg check = [=](const BufferArg& arg) { - EXPECT_EQ(arg.shape().ndims(), 1); - EXPECT_EQ(arg.shape()[0], 100); + EXPECT_EQ(arg.shape().ndims(), 1U); + EXPECT_EQ(arg.shape()[0], 100U); EXPECT_EQ(arg.data(), vector->getData()); CpuVector inVector = arg.vector(); @@ -131,9 +131,9 @@ TEST(Arguments, Vector) { TEST(Arguments, CpuSparseMatrix) { CpuSparseMatrix sparse(200, 300, 50); CheckBufferArg check = [=](const BufferArg& arg) { - EXPECT_EQ(arg.shape().ndims(), 2); - EXPECT_EQ(arg.shape()[0], 200); - EXPECT_EQ(arg.shape()[1], 300); + EXPECT_EQ(arg.shape().ndims(), 2U); + EXPECT_EQ(arg.shape()[0], 200U); + EXPECT_EQ(arg.shape()[1], 300U); EXPECT_EQ(arg.data(), sparse.getData()); // CHECK_EQ(arg.sparse().nnz(), 50); // CHECK_EQ(arg.sparse().dataFormat(), SPARSE_CSR_FORMAT); @@ -152,10 +152,10 @@ TEST(Arguments, CpuSparseMatrix) { TEST(Arguments, BufferArg) { BufferArg arg(nullptr, VALUE_TYPE_FLOAT, {1, 2, 3}); CheckBufferArg check = [=](const BufferArg& arg) { - EXPECT_EQ(arg.shape().ndims(), 3); - EXPECT_EQ(arg.shape()[0], 1); - EXPECT_EQ(arg.shape()[1], 2); - EXPECT_EQ(arg.shape()[2], 3); + EXPECT_EQ(arg.shape().ndims(), 3U); + EXPECT_EQ(arg.shape()[0], 1U); + EXPECT_EQ(arg.shape()[1], 2U); + EXPECT_EQ(arg.shape()[2], 3U); }; BufferArgs argments; diff --git a/paddle/function/TensorShapeTest.cpp b/paddle/function/TensorShapeTest.cpp index e5c698237706e7210d3045bbfd0088af58db2954..e55d516d4ac8e86b05911ac1a4e7095f53e2ea3c 100644 --- a/paddle/function/TensorShapeTest.cpp +++ b/paddle/function/TensorShapeTest.cpp @@ -44,7 +44,7 @@ TEST(TensorShape, GetAndSet) { EXPECT_EQ(t.ndims(), 3U); EXPECT_EQ(t.getElements(), 6U); - EXPECT_EQ(t[1], 2); + EXPECT_EQ(t[1], 2U); t.setDim(1, 100); EXPECT_EQ(t.getElements(), 300U); EXPECT_EQ(t[1], 100U); diff --git a/paddle/gserver/layers/CudnnBatchNormLayer.cpp b/paddle/gserver/layers/CudnnBatchNormLayer.cpp index 09dac05a7ad7a80bd6b9e12e8f7f060310d516c8..44ba2c4b7d1562d2ce839b5f4b4de1af35e6925f 100644 --- a/paddle/gserver/layers/CudnnBatchNormLayer.cpp +++ b/paddle/gserver/layers/CudnnBatchNormLayer.cpp @@ -14,6 +14,7 @@ limitations under the License. */ #include "CudnnBatchNormLayer.h" #include "Layer.h" +#include "paddle/cuda/include/hl_batch_norm.h" #include "paddle/utils/Stat.h" namespace paddle { @@ -79,16 +80,33 @@ void CudnnBatchNormLayer::forward(PassType passType) { savedInvVar); } else { // used movingMean and movingVar in testing - hl_batch_norm_forward_inference(ioDesc_, - input, - ioDesc_, - output, - bnParamDesc_, - gamma, - beta, - movingMean, - movingVar, - EPS); + if (batchSize <= 1024) { + hl_batch_norm_forward_inference(ioDesc_, + input, + ioDesc_, + output, + bnParamDesc_, + gamma, + beta, + movingMean, + movingVar, + EPS); + } else { + // There is a limitation in cudnn library. + // When the batch size is larger than 1024 in cuDNN v5.1, + // the cudnnBatchNormalizationForwardInference will fail. + hl_batch_norm_cuda_inference(input, + output, + gamma, + beta, + movingMean, + movingVar, + EPS, + batchSize, + channels_, + imageH_, + imageW_); + } } /* activation */ { diff --git a/paddle/gserver/layers/KmaxSeqScoreLayer.cpp b/paddle/gserver/layers/KmaxSeqScoreLayer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8ce591d4762466e1ed4b2970cb9cae9203bc0a2b --- /dev/null +++ b/paddle/gserver/layers/KmaxSeqScoreLayer.cpp @@ -0,0 +1,117 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "Layer.h" + +namespace paddle { + +class KmaxSeqScoreLayer : public Layer { +private: + MatrixPtr scores_; + size_t beamSize_; + void kmaxScorePerSeq(const real* score, + real* sortedRes, + const ICpuGpuVectorPtr seqStartPos); + +public: + explicit KmaxSeqScoreLayer(const LayerConfig& config) : Layer(config) {} + + bool init(const LayerMap& layerMap, + const ParameterMap& parameterMap) override; + + void forward(PassType passType) override; + void backward(const UpdateCallback& callback = nullptr) override; +}; + +REGISTER_LAYER(kmax_seq_score, KmaxSeqScoreLayer); + +bool KmaxSeqScoreLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + bool ret = Layer::init(layerMap, parameterMap); + CHECK_EQ(1U, inputLayers_.size()); + + beamSize_ = config_.beam_size(); + CHECK_GE(beamSize_, 1U); + + setNeedSequenceInfo(false); + setNeedGradient(false); + return ret; +} + +void KmaxSeqScoreLayer::kmaxScorePerSeq(const real* scores, + real* sortedIds, + const ICpuGpuVectorPtr seqStartPos) { + int* starts = seqStartPos->getMutableData(false); + std::vector indices; + for (size_t i = 0; i < seqStartPos->getSize() - 1; ++i) { + int seqLen = starts[i + 1] - starts[i]; + int k = std::min(static_cast(beamSize_), seqLen); + + indices.resize(seqLen, 0); + std::iota(begin(indices), end(indices), 0.); + std::vector tmpScore(scores + starts[i], scores + starts[i + 1]); + std::partial_sort( + begin(indices), + begin(indices) + k, + end(indices), + [&](size_t a, size_t b) { return tmpScore[a] > tmpScore[b]; }); + memcpy(sortedIds + (i * beamSize_), indices.data(), k * sizeof(real)); + } +} + +void KmaxSeqScoreLayer::forward(PassType passType) { + Layer::forward(passType); + + const Argument& input = getInput(0); + const MatrixPtr inputScore = getInputValue(0); + + CHECK(input.hasSeq() || input.hasSubseq()) + << "input of " << getName() + << " must be a sequence or a nested sequence."; + CHECK_EQ(input.value->getWidth(), 1UL) + << "input of " << getName() + << " is score over a sequence or a nested sequence, so its width " + << " must be 1."; + + if (useGpu_) { + // this Layer runs only in CPU, if the model is runing on GPU, + // then copy the input to this layer from GPU to CPU. + Matrix::resizeOrCreate(scores_, + inputScore->getHeight(), + 1, + false /* trans */, + false /* useGpu */); + scores_->copyFrom(*inputScore); + } else { + scores_ = inputScore; + } + + Matrix::resizeOrCreate( + output_.value, + input.hasSubseq() ? input.getNumSubSequences() : input.getNumSequences(), + beamSize_, + false, + false); + output_.value->one(); + output_.value->mulScalar(-1.); + + kmaxScorePerSeq(scores_->getData(), + output_.value->getData(), + input.hasSubseq() ? input.subSequenceStartPositions + : input.sequenceStartPositions); +} + +void KmaxSeqScoreLayer::backward(const UpdateCallback& callback) {} + +} // namespace paddle diff --git a/paddle/gserver/layers/SubNestedSequenceLayer.cpp b/paddle/gserver/layers/SubNestedSequenceLayer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..648d3908f391450f276d8a900ebb3bccb8d5532c --- /dev/null +++ b/paddle/gserver/layers/SubNestedSequenceLayer.cpp @@ -0,0 +1,176 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "Layer.h" +#include "paddle/math/Matrix.h" +#include "paddle/math/Vector.h" +#include "paddle/utils/Logging.h" +#include "paddle/utils/Stat.h" + +namespace paddle { + +class SubNestedSequenceLayer : public Layer { +public: + explicit SubNestedSequenceLayer(const LayerConfig& config) : Layer(config) {} + + bool init(const LayerMap& layerMap, + const ParameterMap& parameterMap) override; + + void forward(PassType passType) override; + void backward(const UpdateCallback& callback = nullptr) override; + +private: + /* + * This functions generates the indices of rows in a batch according to the + * indices of selected sub-sequence in each sequence. + * + * Examples: + * selectedIndices: + * [ + * [0, 1, -1], + * [0, 1, 2], + * [0, -1, -1], + * [0, 2, 3], + * ] + * inputSeqInfo: + * [ + * [0,3,4], + * [4,5,7,10,15], + * [15,20], + * [20,22,23,25,28] + * ] + * + * ths output is saved to private member rowIndice_; + * [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, + * 16,17,18,19,20,21,22,23,24,25,26,27] + */ + + void calSelectedCols(const MatrixPtr selectedIndices, + const std::vector>& inputSeqInfo); + + // if the second input of this layer is on GPU memory, copy it to CPU memory. + MatrixPtr selIdsCpu_; + + // reorganized sequenceStartPositions and subSequenceStartPositions + // into a 2d vector to facilitate the sequence selection process. + std::vector> inputSeqInfoVec_; + + // the final selected row indices in a batch, + // rowIdx_ and selectedRows_ actually share a same memory. + IVectorPtr rowIndice_; + std::vector selectedRows_; +}; + +REGISTER_LAYER(sub_nested_seq, SubNestedSequenceLayer); + +bool SubNestedSequenceLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + /* Initialize the basic parent class */ + Layer::init(layerMap, parameterMap); + CHECK_EQ(2U, inputLayers_.size()); + setNeedSequenceInfo(false); + return true; +} + +void SubNestedSequenceLayer::calSelectedCols( + const MatrixPtr selectedIndices, + const std::vector>& inputSeqInfo) { + selectedRows_.clear(); + + std::vector outSeqStartInfo(1, 0); + std::vector outSubSeqStartInfo(1, 0); + + size_t seqNum = selectedIndices->getHeight(); + size_t beamSize = selectedIndices->getWidth(); + for (size_t i = 0; i < seqNum; ++i) { + for (size_t j = 0; j < beamSize; ++j) { + if (selectedIndices->getElement(i, j) == -1.) break; + size_t selSubSeqIdx = selectedIndices->getElement(i, j); + CHECK_GT(inputSeqInfoVec_[i].size() - 1, selSubSeqIdx); + + size_t subSeqLen = inputSeqInfoVec_[i][selSubSeqIdx + 1] - + inputSeqInfoVec_[i][selSubSeqIdx]; + for (size_t k = 0; k < subSeqLen; ++k) + selectedRows_.push_back(inputSeqInfoVec_[i][selSubSeqIdx] + k); + outSubSeqStartInfo.push_back(outSubSeqStartInfo.back() + subSeqLen); + } + outSeqStartInfo.push_back(outSubSeqStartInfo.back()); + } + + if (useGpu_) { + rowIndice_ = IVector::create(selectedRows_.size(), useGpu_); + rowIndice_->copyFrom(selectedRows_.data(), selectedRows_.size()); + } else { + rowIndice_ = + IVector::create(selectedRows_.data(), selectedRows_.size(), useGpu_); + } + + // create the sequence information for the output. + ICpuGpuVector::resizeOrCreate( + output_.sequenceStartPositions, outSeqStartInfo.size(), false); + output_.sequenceStartPositions->copyFrom( + outSeqStartInfo.data(), outSeqStartInfo.size(), false); + + ICpuGpuVector::resizeOrCreate( + output_.subSequenceStartPositions, outSubSeqStartInfo.size(), false); + output_.subSequenceStartPositions->copyFrom( + outSubSeqStartInfo.data(), outSubSeqStartInfo.size(), false); +} + +void SubNestedSequenceLayer::forward(PassType passType) { + Layer::forward(passType); + + const Argument& inputSeq = getInput(0); + CHECK(inputSeq.hasSubseq()) << "The first input of SubNestSequence layer " + << "must be a nested sequence."; + const MatrixPtr selectedIndices = getInputValue(1); + CHECK_EQ(size_t(inputSeq.getNumSequences()), selectedIndices->getHeight()); + + if (dynamic_cast(selectedIndices.get())) { + /* + * Currently, the second input for this layer is generated by + * kmax_sequence_score_layer whose output is always stored on CPU, + * or a data_layer which canbe on GPU. + * + * If the second input is on GPU, copy it to CPU memory, because this + * input always uses very few memory, and operations related to it are + * all logic control, not computations. + */ + Matrix::resizeOrCreate(selIdsCpu_, + selectedIndices->getHeight(), + selectedIndices->getWidth(), + false /* trans */, + false /* useGpu */); + selIdsCpu_->copyFrom(*selectedIndices); + } else { + selIdsCpu_ = selectedIndices; + } + + Argument::reorganizeSeqInfo(inputSeq.sequenceStartPositions, + inputSeq.subSequenceStartPositions, + inputSeqInfoVec_); + calSelectedCols(selIdsCpu_, inputSeqInfoVec_); + + resetOutput(selectedRows_.size(), getSize()); + getOutputValue()->selectRows(*getInputValue(0), *rowIndice_); +} + +void SubNestedSequenceLayer::backward(const UpdateCallback& callback) { + MatrixPtr inputSeqGrad = getInputGrad(0); + MatrixPtr outputGrad = getOutputGrad(); + + if (inputSeqGrad) outputGrad->addToRows(*inputSeqGrad, *rowIndice_); +} + +} // namespace paddle diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt index 5511ab6b8bb05108e76cc0913264d864d2fecf5b..209d0ab9c8d7e8463c8636b1412622a94f359fb1 100644 --- a/paddle/gserver/tests/CMakeLists.txt +++ b/paddle/gserver/tests/CMakeLists.txt @@ -66,6 +66,16 @@ add_unittest_without_exec(test_BatchNorm add_test(NAME test_BatchNorm COMMAND test_BatchNorm) + + +################# test_KmaxSeqScore ####################### +add_unittest_without_exec(test_KmaxSeqScore + test_KmaxSeqScore.cpp + LayerGradUtil.cpp) + +add_test(NAME test_KmaxSeqScore + COMMAND test_KmaxSeqScore) + ################## test_Evaluator ####################### add_unittest(test_Evaluator test_Evaluator.cpp) diff --git a/paddle/gserver/tests/test_BatchNorm.cpp b/paddle/gserver/tests/test_BatchNorm.cpp index 83fcfed46cd568d22237eeef9c0215e4e3ad2666..659eefa31bdb1f2433d03a59d5bf4782c71bdecf 100644 --- a/paddle/gserver/tests/test_BatchNorm.cpp +++ b/paddle/gserver/tests/test_BatchNorm.cpp @@ -21,6 +21,8 @@ limitations under the License. */ #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" +#include "paddle/cuda/include/hl_batch_norm.h" +#include "paddle/math/tests/TensorCheck.h" #include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT @@ -117,6 +119,74 @@ TEST(Layer, batchNorm) { CHECK_EQ(static_cast(convLayer->getOutputValue()->getWidth()), 576); } +#ifndef PADDLE_ONLY_CPU +void batchNormInference(int n, int c, int h, int w) { + MatrixPtr input = std::make_shared(n, c * h * w); + MatrixPtr cudnnOut = std::make_shared(n, c * h * w); + MatrixPtr cudaOut = std::make_shared(n, c * h * w); + MatrixPtr cudnnCheck = std::make_shared(n, c * h * w); + MatrixPtr cudaCheck = std::make_shared(n, c * h * w); + input->randomizeUniform(); + cudnnOut->zeroMem(); + cudaOut->zeroMem(); + + MatrixPtr scale = std::make_shared(1, c); + scale->randomizeUniform(); + MatrixPtr bias = std::make_shared(1, c); + bias->randomizeUniform(); + + MatrixPtr movingMean = std::make_shared(1, c); + movingMean->randomizeUniform(); + + MatrixPtr movingVar = std::make_shared(1, c); + movingVar->randomizeUniform(); + movingVar->clip(0.01, 50); + + hl_tensor_descriptor ioDesc; + hl_tensor_descriptor bnDesc; + hl_create_tensor_descriptor(&ioDesc); + hl_create_tensor_descriptor(&bnDesc); + hl_tensor_reshape(ioDesc, n, c, h, w); + hl_tensor_reshape(bnDesc, 1, c, 1, 1); + + double EPS = 1E-5; + hl_batch_norm_forward_inference(ioDesc, + input->getData(), + ioDesc, + cudnnOut->getData(), + bnDesc, + scale->getData(), + bias->getData(), + movingMean->getData(), + movingVar->getData(), + EPS); + + hl_batch_norm_cuda_inference(input->getData(), + cudaOut->getData(), + scale->getData(), + bias->getData(), + movingMean->getData(), + movingVar->getData(), + EPS, + n, + c, + h, + w); + + cudnnCheck->copyFrom(*cudnnOut); + cudaCheck->copyFrom(*cudaOut); + autotest::TensorCheckErr(*cudnnCheck, *cudaCheck); + + hl_destroy_tensor_descriptor(ioDesc); + hl_destroy_tensor_descriptor(bnDesc); +} + +TEST(BatchNorm, Inference) { + batchNormInference(33, 267, 1, 1); + batchNormInference(19, 105, 4, 4); +} +#endif + int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); initMain(argc, argv); diff --git a/paddle/gserver/tests/test_KmaxSeqScore.cpp b/paddle/gserver/tests/test_KmaxSeqScore.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a51fe390c74d74cd5f3d07df62b715b239335548 --- /dev/null +++ b/paddle/gserver/tests/test_KmaxSeqScore.cpp @@ -0,0 +1,160 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include +#include "ModelConfig.pb.h" +#include "paddle/gserver/layers/DataLayer.h" +#include "paddle/trainer/Trainer.h" +#include "paddle/utils/GlobalConstants.h" + +#include "LayerGradUtil.h" +#include "paddle/testing/TestUtil.h" + +using namespace paddle; // NOLINT +using namespace std; // NOLINT + +DECLARE_bool(use_gpu); +DECLARE_int32(gpu_id); +DECLARE_bool(thread_local_rand_use_global_seed); + +vector randSampling(int range, int n) { + CHECK_GE(range, n); + vector num(range); + iota(begin(num), end(num), 0); + if (range == n) return num; + + random_shuffle(begin(num), end(num)); + num.resize(n); + return num; +} + +void genRandomSeqInfo(vector& seqStartPosition, + vector& subSeqStartPosition) { + const int maxSeqNum = 100; + // generate random start position information + int seqNum = 1 + (rand() % maxSeqNum); + seqStartPosition.resize(seqNum + 1, 0); + subSeqStartPosition.resize(1, 0); + + for (int i = 0; i < seqNum; ++i) { + int subSeqLen = 1 + (rand() % maxSeqNum); + for (int j = 0; j < subSeqLen; ++j) + subSeqStartPosition.push_back(subSeqStartPosition.back() + subSeqLen); + seqStartPosition[i + 1] = subSeqStartPosition.back(); + } +} + +void genRandomGroundTruth(real* values, + vector>& groundTruth, + vector& startPos, + size_t beamSize) { + groundTruth.resize(startPos.size() - 1, vector(beamSize, -1)); + for (size_t i = 0; i < startPos.size() - 1; ++i) { + int seqLen = startPos[i + 1] - startPos[i]; + vector pos = + randSampling(seqLen, min(static_cast(beamSize), seqLen)); + for (size_t j = 0; j < pos.size(); ++j) { + groundTruth[i][j] = pos[j]; + values[startPos[i] + pos[j]] = 1.; + } + } +} + +void checkLayerOut(vector> groundTruth, + real* layerOut, + size_t beamSize) { + for (size_t i = 0; i < groundTruth.size(); ++i) { + int begPos = i * beamSize; + vector tmp(layerOut + begPos, layerOut + begPos + beamSize); + sort(begin(tmp), end(tmp)); + sort(begin(groundTruth[i]), end(groundTruth[i])); + for (size_t j = 0; j < beamSize; ++j) CHECK_EQ(tmp[j], groundTruth[i][j]); + } +} + +TEST(Layer, kmaxSeqScoreLayer) { + const size_t maxBeamSize = 100; + size_t beamSize = 1 + (rand() % maxBeamSize); + + vector seqStartPosition; + vector subSeqStartPosition; + genRandomSeqInfo(seqStartPosition, subSeqStartPosition); + MatrixPtr inValue = + Matrix::create(subSeqStartPosition.back(), 1, false, false); + + for (auto hasSubseq : {false, true}) { + vector> groundTruth; + inValue->randomizeUniform(); + genRandomGroundTruth(inValue->getData(), + groundTruth, + hasSubseq ? subSeqStartPosition : seqStartPosition, + beamSize); + + for (auto useGpu : {false, true}) { + TestConfig config; + config.layerConfig.set_type("kmax_seq_score"); + config.layerConfig.set_beam_size(beamSize); + + if (hasSubseq) { + config.inputDefs.push_back({INPUT_SELF_DEFINE_DATA, + "scores", + inValue, + seqStartPosition, + subSeqStartPosition}); + } else { + config.inputDefs.push_back( + {INPUT_SELF_DEFINE_DATA, "scores", inValue, seqStartPosition}); + } + config.layerConfig.add_inputs(); + + // data layer initialize + std::vector dataLayers; + LayerMap layerMap; + vector datas; + initDataLayer( + config, + &dataLayers, + &datas, + &layerMap, + "kmax_seq_score", + 100 /* actually this parameter is unused in self-defined input*/, + false, + useGpu); + // test layer initialize + std::vector parameters; + LayerPtr kmaxSeqScoreLayer; + FLAGS_use_gpu = useGpu; + initTestLayer(config, &layerMap, ¶meters, &kmaxSeqScoreLayer); + kmaxSeqScoreLayer->forward(PASS_TRAIN); + + const MatrixPtr outValue = kmaxSeqScoreLayer->getOutputValue(); + CHECK_EQ(outValue->getHeight(), + hasSubseq ? subSeqStartPosition.size() - 1 + : seqStartPosition.size() - 1); + CHECK_EQ(outValue->getWidth(), beamSize); + checkLayerOut(groundTruth, outValue->getData(), beamSize); + } + } +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + initMain(argc, argv); + FLAGS_thread_local_rand_use_global_seed = true; + srand((size_t)(time(NULL))); + return RUN_ALL_TESTS(); +} diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index fe11278f41c0118ee0bdb34f17fbf9602e0fa76b..0f312b6ca50bc1e6317251ba785f1c61a224b54e 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -1899,6 +1899,84 @@ TEST(Layer, CropLayer) { } } +vector randSampling(real range, int n) { + CHECK_GE(range, n); + vector num(range); + iota(begin(num), end(num), 0.); + if (range == n) return num; + + random_shuffle(begin(num), end(num)); + num.resize(n); + sort(begin(num), end(num)); + return num; +} + +TEST(Layer, SubNestedSequenceLayer) { + // layer size is not crutial for this layer, + // so use a small layer size in unittest + const int layerSize = 4; + + const int maxSeqNum = 50; + const int maxSeqLen = 50; + const int maxBeamSize = 32; + + srand((size_t)(time(NULL))); + int beamSize = 1 + (rand() % maxBeamSize); + + TestConfig config; + config.layerConfig.set_type("sub_nested_seq"); + config.layerConfig.set_name("sub_nested_seq_layer"); + config.layerConfig.set_size(layerSize); + + int seqNum = 1 + (rand() % maxSeqNum); + + // sequence information for the first input, it is a nested sequence + vector seqStartPos(seqNum + 1, 0); + vector subSeqStartPos(1, 0); + + // selected indices + MatrixPtr selectedIndices = Matrix::create(seqNum, beamSize, false, false); + selectedIndices->one(); + selectedIndices->mulScalar(-1.); + real* indicesData = selectedIndices->getData(); + + for (int i = 0; i < seqNum; ++i) { + int subSeqNum = 1 + (rand() % maxSeqNum); + for (int j = 0; j < subSeqNum; ++j) { + subSeqStartPos.push_back(subSeqStartPos.back() + + (1 + (rand() % maxSeqLen))); + } + vector selSeqs = + randSampling(static_cast(subSeqNum), min(beamSize, subSeqNum)); + memcpy(indicesData + (i * beamSize), + selSeqs.data(), + selSeqs.size() * sizeof(real)); + seqStartPos[i + 1] = subSeqStartPos.back(); + } + + MatrixPtr seqInputPtr = + Matrix::create(seqStartPos.back(), layerSize, false, false); + seqInputPtr->randomizeUniform(); + config.inputDefs.push_back({INPUT_SELF_DEFINE_DATA, + "nested_seq_input", + seqInputPtr, + seqStartPos, + subSeqStartPos}); + config.layerConfig.add_inputs(); + config.inputDefs.push_back( + {INPUT_SELF_DEFINE_DATA, "selected_indices", selectedIndices}); + config.layerConfig.add_inputs(); + + for (auto useGpu : {false, true}) { + testLayerGrad(config, + "sub_nested_seq", + /* batchSize */ seqNum, + /* trans */ false, + /* useGpu*/ useGpu, + /* useWeight */ false); + } +} + TEST(Layer, ClipLayer) { const size_t batchSize = 128; const size_t size = 512; diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index db63706d7203718cdee661ebae8a9bb538323cd4..7085905c2a7094dfd676135166372986f2eb1ba8 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -61,6 +61,7 @@ op_library(cross_entropy_op SRCS cross_entropy_op.cc cross_entropy_op.cu) op_library(fill_zeros_like_op SRCS fill_zeros_like_op.cc fill_zeros_like_op.cu) op_library(sgd_op SRCS sgd_op.cc sgd_op.cu) +cc_test(sgd_op_test SRCS sgd_op_test.cc DEPS sgd_op) op_library(fc_op SRCS fc_op.cc @@ -68,3 +69,5 @@ op_library(fc_op op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc DEPS op_desc tensor op_registry operator net_op) cc_test(recurrent_op_test SRCS recurrent_op_test.cc DEPS recurrent_op gtest mul_op add_op) +op_library(uniform_random_op + SRCS uniform_random_op.cc uniform_random_op.cu) diff --git a/paddle/operators/add_op.cc b/paddle/operators/add_op.cc index d4c05ed483ca56a31dd8ee4d81b54551ae6da0d1..086245ef62d759ab20a3684ddbc015f6c6258639 100644 --- a/paddle/operators/add_op.cc +++ b/paddle/operators/add_op.cc @@ -17,13 +17,12 @@ limitations under the License. */ namespace paddle { namespace operators { -class AddOp : public OperatorWithKernel { +class AddOp : public framework::OperatorWithKernel { protected: - void InferShape(const InferShapeContext &ctx) const override { + void InferShape(const framework::InferShapeContext &ctx) const override { PADDLE_ENFORCE_EQ(ctx.InputSize(), 2); PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1); - PADDLE_ENFORCE(ctx.InputVar(0) != nullptr && ctx.InputVar(1) != nullptr, - "Inputs of AddOp must all be set"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0), "Inputs of AddOp must all be set"); PADDLE_ENFORCE(ctx.OutputVar(0) != nullptr, "Outputs of AddOp must all be set"); PADDLE_ENFORCE(ctx.Input(0)->dims() == ctx.Input(1)->dims(), @@ -32,9 +31,9 @@ class AddOp : public OperatorWithKernel { } }; -class AddOpMaker : public OpProtoAndCheckerMaker { +class AddOpMaker : public framework::OpProtoAndCheckerMaker { public: - AddOpMaker(OpProto *proto, OpAttrChecker *op_checker) + AddOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The first input of add op"); AddInput("Y", "The second input of add op"); @@ -47,14 +46,17 @@ The equation is: Out = X + Y } }; -class AddOpGrad : public OperatorWithKernel { +class AddOpGrad : public framework::OperatorWithKernel { protected: - void InferShape(const InferShapeContext &ctx) const override {} + void InferShape(const framework::InferShapeContext &ctx) const override {} }; } // namespace operators } // namespace paddle +namespace ops = paddle::operators; REGISTER_OP(add_two, ops::AddOp, ops::AddOpMaker); REGISTER_GRADIENT_OP(add_two, add_two_grad, ops::AddOpGrad); -REGISTER_OP_CPU_KERNEL(add_two, ops::AddKernel); + +REGISTER_OP_CPU_KERNEL(add_two, + ops::AddKernel); diff --git a/paddle/operators/add_op.cu b/paddle/operators/add_op.cu index 9bd08634da96c5595d6dd702ad9afafb94632b03..cec5f558cbc161124620ad4241d6bd8a5324277c 100644 --- a/paddle/operators/add_op.cu +++ b/paddle/operators/add_op.cu @@ -16,4 +16,6 @@ #include "paddle/framework/op_registry.h" #include "paddle/operators/add_op.h" -REGISTER_OP_GPU_KERNEL(add_two, ops::AddKernel); +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(add_two, + ops::AddKernel); diff --git a/paddle/operators/add_op.h b/paddle/operators/add_op.h index 9db19a61381fdb11350276d51d3ebbf083672022..d76c10957e943deb970b1d79a1507a36669314e3 100644 --- a/paddle/operators/add_op.h +++ b/paddle/operators/add_op.h @@ -13,15 +13,21 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/operators/type_alias.h" +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { +using Tensor = framework::Tensor; +template +using EigenVector = framework::EigenVector; + template -class AddKernel : public OpKernel { +class AddKernel : public framework::OpKernel { public: - void Compute(const ExecutionContext& context) const override { + void Compute(const framework::ExecutionContext& context) const override { auto input0 = context.Input(0); auto input1 = context.Input(1); auto output = context.Output(0); diff --git a/paddle/operators/add_op_test.cc b/paddle/operators/add_op_test.cc index 3d52f5498323dbb7ca0ff25d038947f0ddb2017e..bf529defb20d27200a28666278db8607b986e2d5 100644 --- a/paddle/operators/add_op_test.cc +++ b/paddle/operators/add_op_test.cc @@ -14,9 +14,9 @@ limitations under the License. */ #include #define private public -#include +#include "paddle/framework/op_registry.h" + USE_OP(add_two); -// USE_OP(add_two_grad); TEST(AddOp, GetOpProto) { auto& protos = paddle::framework::OpRegistry::protos(); diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index b0e1b8e41a5320aa14e316a56dbfd01e43c6816b..c813d54e17fa48aa4447ef76b918b7355be52b09 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -17,28 +17,29 @@ limitations under the License. */ namespace paddle { namespace operators { -class OnehotCrossEntropyOp : public OperatorWithKernel { +class OnehotCrossEntropyOp : public framework::OperatorWithKernel { protected: - void InferShape(const InferShapeContext &ctx) const override { - PADDLE_ENFORCE(ctx.InputSize() == 2, - "Input size of OnehotCrossEntropyOp must be two"); - PADDLE_ENFORCE(ctx.OutputSize() == 1, - "Output size of OnehotCrossEntropyOp must be one"); - PADDLE_ENFORCE(ctx.InputVar(0) != nullptr && ctx.InputVar(1) != nullptr, - "Inputs of OnehotCrossEntropyOp must all be set"); - PADDLE_ENFORCE(ctx.OutputVar(0) != nullptr, - "Outputs of OnehotCrossEntropyOp must all be set"); - PADDLE_ENFORCE(ctx.Input(0)->dims().size() == 2, - "X's dimension must be 2."); - PADDLE_ENFORCE(ctx.Output(0)->dims().size() == 1, - "label's dimension must be 1."); + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_EQ(ctx.InputSize(), 2, + "Input size of OnehotCrossEntropyOp must be two"); + PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1, + "Output size of OnehotCrossEntropyOp must be one"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0), + "0-th input of OnehotCrossEntropyOp should be set"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(1), + "1-th input of OnehotCrossEntropyOp should be set"); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar(0), + "Outputs of OnehotCrossEntropyOp must all be set"); + PADDLE_ENFORCE_EQ(ctx.Input(0)->dims().size(), 2); + PADDLE_ENFORCE_EQ(ctx.Output(0)->dims().size(), 1, + "label's dimension must be 1."); ctx.Output(0)->Resize({ctx.Input(0)->dims()[0]}); } }; -class OnehotCrossEntropyGradientOp : public OperatorWithKernel { +class OnehotCrossEntropyGradientOp : public framework::OperatorWithKernel { protected: - void InferShape(const InferShapeContext &ctx) const override { + void InferShape(const framework::InferShapeContext &ctx) const override { auto X_grad = ctx.Output(framework::GradVarName("X")); auto X = ctx.Input("X"); @@ -47,9 +48,10 @@ class OnehotCrossEntropyGradientOp : public OperatorWithKernel { } }; -class OnehotCrossEntropyOpMaker : public OpProtoAndCheckerMaker { +class OnehotCrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker { public: - OnehotCrossEntropyOpMaker(OpProto *proto, OpAttrChecker *op_checker) + OnehotCrossEntropyOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The first input of OnehotCrossEntropyOp"); AddInput("label", "The second input of OnehotCrossEntropyOp"); @@ -65,11 +67,14 @@ OnehotCrossEntropy Operator. } // namespace operators } // namespace paddle +namespace ops = paddle::operators; REGISTER_OP(onehot_cross_entropy, ops::OnehotCrossEntropyOp, ops::OnehotCrossEntropyOpMaker); -REGISTER_OP_CPU_KERNEL(onehot_cross_entropy, - ops::OnehotCrossEntropyOpKernel); - +REGISTER_OP_CPU_KERNEL( + onehot_cross_entropy, + ops::OnehotCrossEntropyOpKernel); +REGISTER_GRADIENT_OP(onehot_cross_entropy, onehot_cross_entropy_grad, + ops::OnehotCrossEntropyGradientOp); REGISTER_OP_CPU_KERNEL( onehot_cross_entropy_grad, - ops::OnehotCrossEntropyGradientOpKernel); + ops::OnehotCrossEntropyGradientOpKernel); diff --git a/paddle/operators/cross_entropy_op.cu b/paddle/operators/cross_entropy_op.cu index 2f453f8379ca7ce0612fed757719acb2d2cf0ad8..4bbc8f093a794d46737a16488684a6a0cc25e285 100644 --- a/paddle/operators/cross_entropy_op.cu +++ b/paddle/operators/cross_entropy_op.cu @@ -15,5 +15,7 @@ #define EIGEN_USE_GPU #include "paddle/operators/cross_entropy_op.h" -REGISTER_OP_GPU_KERNEL(onehot_cross_entropy, - ops::OnehotCrossEntropyOpKernel); +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL( + onehot_cross_entropy, + ops::OnehotCrossEntropyOpKernel); diff --git a/paddle/operators/cross_entropy_op.h b/paddle/operators/cross_entropy_op.h index 88d06e13469f8e6fc9e634d804c1fe0bed5e2d75..d1bbc2cb66d6ce84ddcdcb87648f23c6ce77b748 100644 --- a/paddle/operators/cross_entropy_op.h +++ b/paddle/operators/cross_entropy_op.h @@ -13,17 +13,36 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/operators/type_alias.h" +#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { -static const float kCrossEntropyLogThreshold{1e-20}; +using Tensor = framework::Tensor; + +template +T tolerable_value(T x) { + static_assert(std::is_floating_point::value, + "tolerable_value works only on float, " + "double and double double."); + + const T kApproInf = 1e20; + + if (x == INFINITY) { + return kApproInf; + } + + if (x == -INFINITY) { + return -kApproInf; + } + + return x; +} template -class OnehotCrossEntropyOpKernel : public OpKernel { +class OnehotCrossEntropyOpKernel : public framework::OpKernel { public: - void Compute(const ExecutionContext& ctx) const override { + void Compute(const framework::ExecutionContext& ctx) const override { auto X = ctx.Input("X"); const T* Xdata = X->data(); const int* label_data = ctx.Input(1)->data(); @@ -36,18 +55,17 @@ class OnehotCrossEntropyOpKernel : public OpKernel { int batch_size = X->dims()[0]; int class_num = X->dims()[1]; - // Y[i] = -log(X[i][j]) for (int i = 0; i < batch_size; ++i) { - Ydata[i] = -std::log(std::max(Xdata[i * class_num + label_data[i]], - kCrossEntropyLogThreshold)); + int index = i * class_num + label_data[i]; + Ydata[i] = -tolerable_value(std::log(Xdata[index])); } } }; template -class OnehotCrossEntropyGradientOpKernel : public OpKernel { +class OnehotCrossEntropyGradientOpKernel : public framework::OpKernel { public: - void Compute(const ExecutionContext& ctx) const override { + void Compute(const framework::ExecutionContext& ctx) const override { auto X = ctx.Input("X"); auto dX = ctx.Output(framework::GradVarName("X")); auto dY = ctx.Input(framework::GradVarName("Y")); @@ -62,9 +80,8 @@ class OnehotCrossEntropyGradientOpKernel : public OpKernel { const int class_num = X->dims()[1]; for (int i = 0; i < batch_size; ++i) { - dXdata[i * class_num + label_data[i]] = - -dYdata[i] / std::max(Xdata[i * class_num + label_data[i]], - kCrossEntropyLogThreshold); + int index = i * class_num + label_data[i]; + dXdata[index] = -tolerable_value(dYdata[i] / Xdata[index]); } } }; diff --git a/paddle/operators/fc_op.cc b/paddle/operators/fc_op.cc index b5cf236bac6bb5abe061f7b4ad469d20e0af76a9..01a1a81206f160386467b3c789a41206d89576b6 100644 --- a/paddle/operators/fc_op.cc +++ b/paddle/operators/fc_op.cc @@ -12,11 +12,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include "type_alias.h" +#include "paddle/operators/net_op.h" + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { +using OpRegistry = framework::OpRegistry; + class FullyConnectedOp : public NetOp { public: void Init() override { @@ -39,9 +44,10 @@ class FullyConnectedOp : public NetOp { } }; -class FullyConnectedOpMaker : public OpProtoAndCheckerMaker { +class FullyConnectedOpMaker : public framework::OpProtoAndCheckerMaker { public: - FullyConnectedOpMaker(OpProto *proto, OpAttrChecker *op_checker) + FullyConnectedOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "the input of fc operator"); AddInput("W", "the weight of fc operator"); @@ -66,4 +72,5 @@ USE_OP(rowwise_add); USE_OP(sigmoid); USE_OP(softmax); +namespace ops = paddle::operators; REGISTER_OP(fc, ops::FullyConnectedOp, ops::FullyConnectedOpMaker); diff --git a/paddle/operators/fill_zeros_like_op.cc b/paddle/operators/fill_zeros_like_op.cc index 3d37d64c5a8c288684122f3e686262399d32ed7b..3759a886780e555ccdc6286c4b200a5d14214691 100644 --- a/paddle/operators/fill_zeros_like_op.cc +++ b/paddle/operators/fill_zeros_like_op.cc @@ -13,8 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/fill_zeros_like_op.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/tensor.h" namespace paddle { namespace operators { @@ -22,14 +20,14 @@ namespace operators { class FillZerosLikeOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - PADDLE_ENFORCE(ctx.InputSize() == 1UL, - "Input size of FillZerosLikeOp must be one."); - PADDLE_ENFORCE(ctx.OutputSize() == 1UL, - "Output size of AddOp must be one."); - PADDLE_ENFORCE(ctx.InputVar(0) != nullptr, - "Input of FillZerosLikeOp must be set."); - PADDLE_ENFORCE(ctx.OutputVar(0) != nullptr, - "Output of FillZerosLikeOp must be set."); + PADDLE_ENFORCE_EQ(ctx.InputSize(), 1UL, + "Input size of FillZerosLikeOp must be one."); + PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1UL, + "Output size of AddOp must be one."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0), + "Input of FillZerosLikeOp must be set."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar(0), + "Output of FillZerosLikeOp must be set."); ctx.Output(0)->Resize( ctx.Input(0)->dims()); } @@ -52,8 +50,8 @@ The output will have the same size with input. } // namespace operators } // namespace paddle -REGISTER_OP(fill_zeros_like, paddle::operators::FillZerosLikeOp, - paddle::operators::FillZerosLikeOpMaker); +namespace ops = paddle::operators; +REGISTER_OP(fill_zeros_like, ops::FillZerosLikeOp, ops::FillZerosLikeOpMaker); REGISTER_OP_CPU_KERNEL( fill_zeros_like, - paddle::operators::FillZerosLikeKernel); + ops::FillZerosLikeKernel); diff --git a/paddle/operators/fill_zeros_like_op.cu b/paddle/operators/fill_zeros_like_op.cu index ed1068219c8fee8c6e8809f450a9d38c8226f317..fdbcf520a0d7b4ddfe3fc1837a21e0ce88b8e8fa 100644 --- a/paddle/operators/fill_zeros_like_op.cu +++ b/paddle/operators/fill_zeros_like_op.cu @@ -12,9 +12,11 @@ See the License for the specific language governing permissions and limitations under the License. */ +#define EIGEN_USE_GPU #include "paddle/framework/op_registry.h" #include "paddle/operators/fill_zeros_like_op.h" +namespace ops = paddle::operators; REGISTER_OP_GPU_KERNEL( fill_zeros_like, - paddle::operators::FillZerosLikeKernel); + ops::FillZerosLikeKernel); diff --git a/paddle/operators/fill_zeros_like_op.h b/paddle/operators/fill_zeros_like_op.h index 4bff1fbfc15af1f4d1ce9c99fe48b0b0f11b5b3f..f846c7a8ab15e2cd997564edb36660a1360227a8 100644 --- a/paddle/operators/fill_zeros_like_op.h +++ b/paddle/operators/fill_zeros_like_op.h @@ -13,9 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "glog/logging.h" #include "paddle/framework/eigen.h" -#include "paddle/framework/operator.h" +#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { @@ -26,7 +25,8 @@ class FillZerosLikeKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { auto* output = context.Output(0); output->mutable_data(context.GetPlace()); - framework::EigenVector::Flatten(*output).setZero(); + auto t = framework::EigenVector::Flatten(*output); + t.device(context.GetEigenDevice()) = t.constant(T(0)); } }; diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index 8a4981c7be7587a0cc5f72cabe71e05702112ac3..997b0c514e96467dc9f9027829616c7b16fe43e1 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -17,20 +17,20 @@ limitations under the License. */ namespace paddle { namespace operators { -class MeanOp : public OperatorWithKernel { +class MeanOp : public framework::OperatorWithKernel { protected: - void InferShape(const InferShapeContext &ctx) const override { - PADDLE_ENFORCE(ctx.InputSize() == 1, "Input size of AddOp must be one"); - PADDLE_ENFORCE(ctx.OutputSize() == 1, "Output size of AddOp must be one"); - PADDLE_ENFORCE(ctx.InputVar(0) != nullptr && ctx.OutputVar(0) != nullptr, - "Input/Output of MeanOp must be initialized."); + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_EQ(ctx.InputSize(), 1, "Input size of AddOp must be one"); + PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1, "Output size of AddOp must be one"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0), "input should be set"); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar(0), "output should be set"); ctx.Output(0)->Resize(framework::make_ddim({1})); } }; -class MeanOpMaker : public OpProtoAndCheckerMaker { +class MeanOpMaker : public framework::OpProtoAndCheckerMaker { public: - MeanOpMaker(OpProto *proto, OpAttrChecker *op_checker) + MeanOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of mean op"); AddOutput("Out", "The output of mean op").IgnoreGradient(); @@ -38,9 +38,9 @@ class MeanOpMaker : public OpProtoAndCheckerMaker { } }; -class MeanGradOp : public OperatorWithKernel { +class MeanGradOp : public framework::OperatorWithKernel { protected: - void InferShape(const InferShapeContext &ctx) const override { + void InferShape(const framework::InferShapeContext &ctx) const override { ctx.Output("X" + framework::kGradVarSuffix) ->Resize(ctx.Input("X")->dims()); } @@ -49,7 +49,10 @@ class MeanGradOp : public OperatorWithKernel { } // namespace operators } // namespace paddle +namespace ops = paddle::operators; REGISTER_OP(mean, ops::MeanOp, ops::MeanOpMaker); -REGISTER_OP_CPU_KERNEL(mean, ops::MeanKernel); +REGISTER_OP_CPU_KERNEL(mean, + ops::MeanKernel); REGISTER_GRADIENT_OP(mean, mean_grad, ops::MeanGradOp); -REGISTER_OP_CPU_KERNEL(mean_grad, ops::MeanGradKernel); +REGISTER_OP_CPU_KERNEL(mean_grad, + ops::MeanGradKernel); diff --git a/paddle/operators/mean_op.cu b/paddle/operators/mean_op.cu index 8b97b0154ccdc8c41a90f7580af829c5c8663b60..7af624d81dc5ffbb5c31b4d6f6eb8f9f8652a431 100644 --- a/paddle/operators/mean_op.cu +++ b/paddle/operators/mean_op.cu @@ -16,5 +16,8 @@ #include "paddle/operators/mean_op.h" -REGISTER_OP_GPU_KERNEL(mean, ops::MeanKernel); -REGISTER_OP_GPU_KERNEL(mean_grad, ops::MeanGradKernel); +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(mean, + ops::MeanKernel); +REGISTER_OP_GPU_KERNEL(mean_grad, + ops::MeanGradKernel); diff --git a/paddle/operators/mean_op.h b/paddle/operators/mean_op.h index 40a1e2d099acad90b1bbac50f62ea7c4f691c1b4..f3db0a29bb234948d180d964fb82057632ec4414 100644 --- a/paddle/operators/mean_op.h +++ b/paddle/operators/mean_op.h @@ -13,15 +13,24 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/operators/type_alias.h" +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { +using Tensor = framework::Tensor; +template +using EigenScalar = framework::EigenScalar; +template +using EigenVector = framework::EigenVector; + template -class MeanKernel : public OpKernel { +class MeanKernel : public framework::OpKernel { public: - void Compute(const ExecutionContext& context) const override { + void Compute(const framework::ExecutionContext& context) const override { auto input = context.Input(0); auto output = context.Output(0); @@ -36,9 +45,9 @@ class MeanKernel : public OpKernel { }; template -class MeanGradKernel : public OpKernel { +class MeanGradKernel : public framework::OpKernel { public: - void Compute(const ExecutionContext& context) const override { + void Compute(const framework::ExecutionContext& context) const override { auto OG = context.Input("Out" + framework::kGradVarSuffix); PADDLE_ENFORCE(framework::product(OG->dims()) == 1, "Mean Gradient should be scalar"); diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index 90761f3257b615a4af8b998de17037cc6de9f247..2b78872a30f6ef6793bc6c8fe3908aed41d089e6 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -18,9 +18,9 @@ namespace paddle { namespace operators { -class MulOp : public OperatorWithKernel { +class MulOp : public framework::OperatorWithKernel { protected: - void InferShape(const InferShapeContext &ctx) const override { + void InferShape(const framework::InferShapeContext &ctx) const override { PADDLE_ENFORCE(ctx.InputSize() == 2, "The mul op must take two inputs"); auto dim0 = ctx.Input(0)->dims(); auto dim1 = ctx.Input(1)->dims(); @@ -38,9 +38,9 @@ class MulOp : public OperatorWithKernel { } }; -class MulOpMaker : public OpProtoAndCheckerMaker { +class MulOpMaker : public framework::OpProtoAndCheckerMaker { public: - MulOpMaker(OpProto *proto, OpAttrChecker *op_checker) + MulOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The first input of mul op"); AddInput("Y", "The second input of mul op"); @@ -53,9 +53,9 @@ The equation is: Out = X * Y } }; -class MulOpGrad : public OperatorWithKernel { +class MulOpGrad : public framework::OperatorWithKernel { protected: - void InferShape(const InferShapeContext &ctx) const override {} + void InferShape(const framework::InferShapeContext &ctx) const override {} std::string DebugString() const override { LOG(INFO) << "MulGrad"; return ""; @@ -65,7 +65,8 @@ class MulOpGrad : public OperatorWithKernel { } // namespace operators } // namespace paddle +namespace ops = paddle::operators; REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker); REGISTER_GRADIENT_OP(mul, mul_grad, ops::MulOpGrad); -REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel); +REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel); diff --git a/paddle/operators/mul_op.cu b/paddle/operators/mul_op.cu index 4a4a68bedca25504979317d8bbc9325d5e1dbcb2..7435b74bd87a392a27c1ae7f4a8d2416526a40d0 100644 --- a/paddle/operators/mul_op.cu +++ b/paddle/operators/mul_op.cu @@ -15,5 +15,4 @@ #define EIGEN_USE_GPU #include "paddle/operators/mul_op.h" - -REGISTER_OP_GPU_KERNEL(mul, ops::MulKernel); +REGISTER_OP_GPU_KERNEL(mul, ops::MulKernel); diff --git a/paddle/operators/mul_op.h b/paddle/operators/mul_op.h index 2eb3f347a20c74fea13b3875b6cb3fb86cfde38d..2087e98901d69574e36adafe2a030abf2305c97b 100644 --- a/paddle/operators/mul_op.h +++ b/paddle/operators/mul_op.h @@ -15,18 +15,28 @@ #pragma once #include "paddle/operators/math/math_function.h" -#include "paddle/operators/type_alias.h" + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { +using Tensor = framework::Tensor; +template +using EigenMatrix = framework::EigenMatrix; + template -class MulKernel : public OpKernel { +class MulKernel : public framework::OpKernel { public: - void Compute(const ExecutionContext& context) const override { - auto* input0 = context.Input("X"); - auto* input1 = context.Input("Y"); - auto* output = context.Output(0); + void Compute(const framework::ExecutionContext& context) const override { + Eigen::array, 1> dim_pair = { + {Eigen::IndexPair(1, 0)}}; + + auto input0 = context.Input("X"); + auto input1 = context.Input("Y"); + auto output = context.Output(0); output->mutable_data(context.GetPlace()); diff --git a/paddle/operators/net_op.cc b/paddle/operators/net_op.cc index fbc98e09923bda7f3baee04e02df9076247bff0b..a466c4f30fe87db4ad2a44518e083b57f3cbc2ed 100644 --- a/paddle/operators/net_op.cc +++ b/paddle/operators/net_op.cc @@ -15,7 +15,6 @@ */ #include "paddle/operators/net_op.h" -#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/operators/net_op.h b/paddle/operators/net_op.h index 6e7af7f02ae23ec65459dfd15d950a43e96fec4d..792b336675fc97659d9a23358cf3d48ede56e54e 100644 --- a/paddle/operators/net_op.h +++ b/paddle/operators/net_op.h @@ -14,13 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/framework/op_desc.pb.h" -#include "paddle/framework/op_proto.pb.h" #include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" -#include "paddle/framework/scope.h" -#include "paddle/operators/type_alias.h" -#include "paddle/platform/device_context.h" namespace paddle { namespace operators { @@ -65,20 +59,29 @@ class NetOp : public framework::OperatorBase { } } + bool SupportGPU() const override { + for (auto& op : ops_) { + if (!op->SupportGPU()) { + return false; + } + } + return true; + } + /** * @brief Add an operator by ptr */ void AddOp(const std::shared_ptr& op) { PADDLE_ENFORCE(!add_op_done_, "Cannot AddOp when this network is sealed"); - PADDLE_ENFORCE(op != nullptr, "Cannot Insert Null op"); + PADDLE_ENFORCE_NOT_NULL(op, "Cannot Insert Null op"); ops_.push_back(op); } void InsertOp(size_t pos, const std::shared_ptr& op) { PADDLE_ENFORCE(!add_op_done_, "Cannot InsertOp when this network is sealed"); - PADDLE_ENFORCE(op != nullptr, "Cannot Insert Null op"); - PADDLE_ENFORCE(pos <= ops_.size(), "Out of range"); + PADDLE_ENFORCE_NOT_NULL(op, "Cannot Insert Null op"); + PADDLE_ENFORCE_LE(pos, ops_.size(), "Out of range"); ops_.insert(ops_.begin() + pos, op); } diff --git a/paddle/operators/net_op_test.cc b/paddle/operators/net_op_test.cc index c0a345464a34329d42c7bf753ca94fd07195b8e0..76bf79f9b51fd759da2d02cd90fa458a32be4178 100644 --- a/paddle/operators/net_op_test.cc +++ b/paddle/operators/net_op_test.cc @@ -2,31 +2,27 @@ #include -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" - namespace paddle { namespace operators { +using Scope = framework::Scope; +using DeviceContext = platform::DeviceContext; static int infer_shape_cnt = 0; static int run_cnt = 0; -class TestOp : public OperatorBase { +class TestOp : public framework::OperatorBase { public: - void InferShape(const framework::Scope& scope) const override { - ++infer_shape_cnt; - } - void Run(const framework::Scope& scope, - const paddle::platform::DeviceContext& dev_ctx) const override { + void InferShape(const Scope& scope) const override { ++infer_shape_cnt; } + void Run(const Scope& scope, + const platform::DeviceContext& dev_ctx) const override { ++run_cnt; } }; -class EmptyOp : public OperatorBase { +class EmptyOp : public framework::OperatorBase { public: void InferShape(const Scope& scope) const override {} - void Run(const Scope& scope, - const platform::DeviceContext& dev_ctx) const override {} + void Run(const Scope& scope, const DeviceContext& dev_ctx) const override {} }; template @@ -72,7 +68,7 @@ TEST(OpKernel, all) { net->Run(scope, dev_ctx); ASSERT_EQ(2, infer_shape_cnt); ASSERT_EQ(2, run_cnt); - ASSERT_THROW(net->AddOp(op2), paddle::platform::EnforceNotMet); + ASSERT_THROW(net->AddOp(op2), platform::EnforceNotMet); } TEST(NetOp, insert_op) { diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index 5e9c15ca0e6a7c56611a0fadda6c3c0839f309e6..243837420562634c3d99fd0acf234ebd53539735 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -14,17 +14,19 @@ #include "paddle/operators/recurrent_op.h" -#include #include #include #include "paddle/framework/op_registry.h" #include "paddle/operators/net_op.h" -#include "paddle/platform/enforce.h" namespace paddle { namespace operators { +using Scope = framework::Scope; +using Variable = framework::Variable; +using Tensor = framework::Tensor; + void RecurrentAlgorithm::InferShape(const Scope& scope) const { seq_len_ = scope.FindVar((arg_->inlinks[0]).external) ->GetMutable() @@ -135,10 +137,11 @@ void RecurrentOp::Init() { alg_.Init(std::move(arg)); } -class RecurrentAlgorithmProtoAndCheckerMaker : public OpProtoAndCheckerMaker { +class RecurrentAlgorithmProtoAndCheckerMaker + : public framework::OpProtoAndCheckerMaker { public: - RecurrentAlgorithmProtoAndCheckerMaker(OpProto* proto, - OpAttrChecker* op_checker) + RecurrentAlgorithmProtoAndCheckerMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { const auto& name = RecurrentOp::kArgName; // inputs and outputs stored in proto diff --git a/paddle/operators/recurrent_op_test.cc b/paddle/operators/recurrent_op_test.cc index 3607d14bf875dc2892fbbdc4dbc9ccf87c1b9784..6ce28a2b52e3b90596a68714bfdbc07d2d4814d5 100644 --- a/paddle/operators/recurrent_op_test.cc +++ b/paddle/operators/recurrent_op_test.cc @@ -27,6 +27,10 @@ namespace operators { using framework::make_ddim; using framework::DDim; +using framework::Tensor; +using framework::Variable; +using framework::Scope; +using framework::OpRegistry; class RecurrentOpTest : public ::testing::Test { protected: @@ -164,7 +168,7 @@ class RecurrentOpTest : public ::testing::Test { // father scope Scope scope_; - std::shared_ptr rnn_op_; + std::shared_ptr rnn_op_; }; TEST_F(RecurrentOpTest, Run) { diff --git a/paddle/operators/rnn/recurrent_op_utils.cc b/paddle/operators/rnn/recurrent_op_utils.cc index 32c6c2dd4efa85359b4e95471e8ba09e56afec57..7e4770630ed2a49214194689aa489e6ab8e476da 100644 --- a/paddle/operators/rnn/recurrent_op_utils.cc +++ b/paddle/operators/rnn/recurrent_op_utils.cc @@ -18,7 +18,9 @@ namespace paddle { namespace operators { namespace rnn { -namespace fmw = paddle::framework; +namespace f = paddle::framework; + +using Tensor = framework::Tensor; void SegmentInputs(const std::vector& step_scopes, const std::vector& inlinks, const size_t seq_len, @@ -30,10 +32,10 @@ void SegmentInputs(const std::vector& step_scopes, inlinks[i].external); Tensor* input = input_var->GetMutable(); - fmw::DDim dims = input->dims(); + f::DDim dims = input->dims(); PADDLE_ENFORCE(static_cast(dims[0]) == seq_len, "all the inlinks must have same length"); - fmw::DDim step_dims = slice_ddim(dims, 1, dims.size()); + f::DDim step_dims = slice_ddim(dims, 1, dims.size()); for (size_t j = 0; j < seq_len; j++) { Tensor* step_input = step_scopes[j]->NewVar(inlinks[i].internal)->GetMutable(); @@ -58,11 +60,10 @@ void ConcatOutputs(const std::vector& step_scopes, auto step_scope_var = step_scopes[0]->FindVar(outlinks[i].internal); PADDLE_ENFORCE(step_scope_var != nullptr, "%s not in scope", outlinks[i].internal); - fmw::DDim step_dims = - step_scope_var->template GetMutable()->dims(); + f::DDim step_dims = step_scope_var->template GetMutable()->dims(); std::vector dims_vec = vectorize(step_dims); dims_vec.insert(dims_vec.begin(), seq_len); - output->Resize(fmw::make_ddim(dims_vec)); + output->Resize(f::make_ddim(dims_vec)); } else { output->mutable_data(platform::CPUPlace()); for (size_t j = 0; j < seq_len; j++) { @@ -104,7 +105,7 @@ void LinkMemories(const std::vector& scopes, } void InitArgument(const ArgumentName& name, Argument* arg, - const OperatorBase& op) { + const framework::OperatorBase& op) { arg->step_net = op.Input(name.step_net); arg->step_scopes = op.Output(name.step_scopes); diff --git a/paddle/operators/rnn/recurrent_op_utils.h b/paddle/operators/rnn/recurrent_op_utils.h index 379754b98fcead6debe0a60efa62fce4b7761940..17941c503cfcc83415b8bc635623a2c2ce2981c3 100644 --- a/paddle/operators/rnn/recurrent_op_utils.h +++ b/paddle/operators/rnn/recurrent_op_utils.h @@ -17,12 +17,13 @@ #include #include "paddle/framework/operator.h" -#include "paddle/operators/type_alias.h" namespace paddle { namespace operators { namespace rnn { +using Scope = framework::Scope; + /** * Memory of a RNN (same as the role of `Momory` in PaddlePaddle). * @@ -86,7 +87,7 @@ void LinkMemories(const std::vector& step_scopes, const int offset, bool infer_shape_mode); void InitArgument(const ArgumentName& name, Argument* arg, - const OperatorBase& op); + const framework::OperatorBase& op); } // namespace rnn } // namespace operators diff --git a/paddle/operators/rowwise_add_op.cc b/paddle/operators/rowwise_add_op.cc index 8d1a36f2b332faad516ced012a409ca428bbf689..55ed1c2f4c316656de94b24dd95b053a89d7e74e 100644 --- a/paddle/operators/rowwise_add_op.cc +++ b/paddle/operators/rowwise_add_op.cc @@ -13,12 +13,13 @@ limitations under the License. */ #include "paddle/operators/rowwise_add_op.h" + namespace paddle { namespace operators { -class RowWiseAddOp : public OperatorWithKernel { +class RowWiseAddOp : public framework::OperatorWithKernel { protected: - void InferShape(const InferShapeContext &ctx) const override { + void InferShape(const framework::InferShapeContext &ctx) const override { PADDLE_ENFORCE(ctx.InputSize() == 2UL, "Two inputs is needed by rowwise add"); auto dim0 = ctx.Input(0)->dims(); @@ -32,9 +33,10 @@ class RowWiseAddOp : public OperatorWithKernel { } }; -class RowWiseAddOpMaker : public OpProtoAndCheckerMaker { +class RowWiseAddOpMaker : public framework::OpProtoAndCheckerMaker { public: - RowWiseAddOpMaker(OpProto *proto, OpAttrChecker *op_checker) + RowWiseAddOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The left input of row-wise add op, must be matrix"); AddInput("b", "The right input of row-wise add op, must be vector"); @@ -50,6 +52,7 @@ for i in xrange(X.shape[0]): } // namespace operators } // namespace paddle +namespace ops = paddle::operators; REGISTER_OP(rowwise_add, ops::RowWiseAddOp, ops::RowWiseAddOpMaker); -REGISTER_OP_CPU_KERNEL(rowwise_add, - ops::RowWiseAddKernel); +REGISTER_OP_CPU_KERNEL( + rowwise_add, ops::RowWiseAddKernel); diff --git a/paddle/operators/rowwise_add_op.cu b/paddle/operators/rowwise_add_op.cu index f76faa0a3a93a1ac277a1d1aa83c3fa6c3944648..86f80b81228a69ac4c05a4693901570f2b9966e0 100644 --- a/paddle/operators/rowwise_add_op.cu +++ b/paddle/operators/rowwise_add_op.cu @@ -15,5 +15,6 @@ #define EIGEN_USE_GPU #include "paddle/operators/rowwise_add_op.h" -REGISTER_OP_GPU_KERNEL(rowwise_add, - ops::RowWiseAddKernel); +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL( + rowwise_add, ops::RowWiseAddKernel); diff --git a/paddle/operators/rowwise_add_op.h b/paddle/operators/rowwise_add_op.h index b52524c47c7b80d8ddc6a94a4a6d03db8034088d..82e9d70e959441869b958c1241fa5f5beef4c50c 100644 --- a/paddle/operators/rowwise_add_op.h +++ b/paddle/operators/rowwise_add_op.h @@ -13,15 +13,24 @@ limitations under the License. */ #pragma once -#include "paddle/operators/type_alias.h" +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { +using Tensor = framework::Tensor; +template +using EigenVector = framework::EigenVector; +template +using EigenMatrix = framework::EigenMatrix; + template -class RowWiseAddKernel : public OpKernel { +class RowWiseAddKernel : public framework::OpKernel { public: - void Compute(const ExecutionContext& context) const override { + void Compute(const framework::ExecutionContext& context) const override { auto out = context.Output(0); out->mutable_data(context.GetPlace()); diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc index 6307583f4ee3f185845690d0e378945d066eae75..f9a28ff8a6a06c5c239c4e6ec21eacb410cc162f 100644 --- a/paddle/operators/sgd_op.cc +++ b/paddle/operators/sgd_op.cc @@ -17,23 +17,23 @@ limitations under the License. */ namespace paddle { namespace operators { -class SGDOp : public OperatorWithKernel { +class SGDOp : public framework::OperatorWithKernel { protected: - void InferShape(const InferShapeContext &ctx) const override { - PADDLE_ENFORCE(ctx.InputSize() == 2, "Input size of SGDOp must be two"); - PADDLE_ENFORCE(ctx.OutputSize() == 1, "Output size of SGDOp must be one"); - PADDLE_ENFORCE(ctx.InputVar(0) != nullptr, "inputs[0] mast be set"); - PADDLE_ENFORCE(ctx.InputVar(1) != nullptr, "inputs[1] mast be set"); - PADDLE_ENFORCE(ctx.OutputVar(0) != nullptr, "outputs[0] mast be set"); + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_EQ(ctx.InputSize(), 2, "Input size of SGDOp must be two"); + PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1, "Output size of SGDOp must be one"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0), "inputs[0] mast be set"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(1), "inputs[1] mast be set"); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar(0), "outputs[0] mast be set"); PADDLE_ENFORCE(ctx.Input(0)->dims() == ctx.Input(1)->dims(), "Two input of SGD Op's dimension must be same."); ctx.Output(0)->Resize(ctx.Input(0)->dims()); } }; -class SGDOpMaker : public OpProtoAndCheckerMaker { +class SGDOpMaker : public framework::OpProtoAndCheckerMaker { public: - SGDOpMaker(OpProto *proto, OpAttrChecker *op_checker) + SGDOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("param", "input parameter"); AddInput("grad", "input gradient"); @@ -51,5 +51,7 @@ param_out = param - learning_rate * grad; } // namespace operators } // namespace paddle +namespace ops = paddle::operators; REGISTER_OP(sgd, ops::SGDOp, ops::SGDOpMaker); -REGISTER_OP_CPU_KERNEL(sgd, ops::SGDOpKernel); +REGISTER_OP_CPU_KERNEL(sgd, + ops::SGDOpKernel); diff --git a/paddle/operators/sgd_op.cu b/paddle/operators/sgd_op.cu index 72629ccfbb8bc8ec53045289bd985c721c62fa10..f5ba6d3c29f8dfbfdea4fbf2c3d5fd7f5b358666 100644 --- a/paddle/operators/sgd_op.cu +++ b/paddle/operators/sgd_op.cu @@ -15,4 +15,6 @@ #define EIGEN_USE_GPU #include "paddle/operators/sgd_op.h" -REGISTER_OP_GPU_KERNEL(sgd, ops::SGDOpKernel); +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(sgd, + ops::SGDOpKernel); diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h index bf5b195933fce7faa46bcc96032e784076178cf7..bfb449d0b029409eda4177fc7643810ee6a1df3d 100644 --- a/paddle/operators/sgd_op.h +++ b/paddle/operators/sgd_op.h @@ -13,15 +13,21 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/operators/type_alias.h" +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { +using Tensor = framework::Tensor; +template +using EigenVector = framework::EigenVector; + template -class SGDOpKernel : public OpKernel { +class SGDOpKernel : public framework::OpKernel { public: - void Compute(const ExecutionContext& ctx) const override { + void Compute(const framework::ExecutionContext& ctx) const override { auto param = ctx.Input("param"); auto grad = ctx.Input("grad"); auto param_out = ctx.Output(0); diff --git a/paddle/operators/sigmoid_op.cc b/paddle/operators/sigmoid_op.cc index 1eb795faa858796f7a34aa495b43d043fdb5dd43..bc5e0bbb183f9bdf0a3fa8a5a02499282fbd6b98 100644 --- a/paddle/operators/sigmoid_op.cc +++ b/paddle/operators/sigmoid_op.cc @@ -13,21 +13,23 @@ limitations under the License. */ #include "paddle/operators/sigmoid_op.h" + namespace paddle { namespace operators { -class SigmoidOp : public OperatorWithKernel { +class SigmoidOp : public framework::OperatorWithKernel { protected: - void InferShape(const InferShapeContext &ctx) const override { + void InferShape(const framework::InferShapeContext &ctx) const override { PADDLE_ENFORCE(ctx.InputSize() == 1, "Sigmoid Op only have one input"); PADDLE_ENFORCE(ctx.OutputSize() == 1, "Sigmoid Op only have one output"); ctx.Output(0)->Resize(ctx.Input(0)->dims()); } }; -class SigmoidOpMaker : public OpProtoAndCheckerMaker { +class SigmoidOpMaker : public framework::OpProtoAndCheckerMaker { public: - SigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker) + SigmoidOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "sigmoid input"); AddOutput("Y", "sigmoid output"); @@ -35,9 +37,9 @@ class SigmoidOpMaker : public OpProtoAndCheckerMaker { } }; -class SigmoidOpGrad : public OperatorWithKernel { +class SigmoidOpGrad : public framework::OperatorWithKernel { protected: - void InferShape(const InferShapeContext &ctx) const override { + void InferShape(const framework::InferShapeContext &ctx) const override { ctx.Output(0)->Resize(ctx.Input(0)->dims()); } }; @@ -45,9 +47,11 @@ class SigmoidOpGrad : public OperatorWithKernel { } // namespace operators } // namespace paddle +namespace ops = paddle::operators; REGISTER_OP(sigmoid, ops::SigmoidOp, ops::SigmoidOpMaker); REGISTER_GRADIENT_OP(sigmoid, sigmoid_grad, ops::SigmoidOpGrad); -REGISTER_OP_CPU_KERNEL(sigmoid, ops::SigmoidKernel); -REGISTER_OP_CPU_KERNEL(sigmoid_grad, - ops::SigmoidGradKernel); +REGISTER_OP_CPU_KERNEL(sigmoid, + ops::SigmoidKernel); +REGISTER_OP_CPU_KERNEL( + sigmoid_grad, ops::SigmoidGradKernel); diff --git a/paddle/operators/sigmoid_op.cu b/paddle/operators/sigmoid_op.cu index e80ba081f2ff805664cf92f3cb47e9ad51889058..1a50dfe14a7b9e2614aadb7729de9f9e461e9905 100644 --- a/paddle/operators/sigmoid_op.cu +++ b/paddle/operators/sigmoid_op.cu @@ -15,6 +15,9 @@ #define EIGEN_USE_GPU #include "paddle/operators/sigmoid_op.h" -REGISTER_OP_GPU_KERNEL(sigmoid, ops::SigmoidKernel); -REGISTER_OP_GPU_KERNEL(sigmoid_grad, - ops::SigmoidGradKernel); +namespace ops = paddle::operators; + +REGISTER_OP_GPU_KERNEL(sigmoid, + ops::SigmoidKernel); +REGISTER_OP_GPU_KERNEL( + sigmoid_grad, ops::SigmoidGradKernel); diff --git a/paddle/operators/sigmoid_op.h b/paddle/operators/sigmoid_op.h index d513261e74423ce93a50eaaaec1c7d5fadb8f4a8..7af879b2091e4a7f80a3a64be029394156650c23 100644 --- a/paddle/operators/sigmoid_op.h +++ b/paddle/operators/sigmoid_op.h @@ -13,16 +13,21 @@ limitations under the License. */ #pragma once - -#include "paddle/operators/type_alias.h" +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { +using Tensor = framework::Tensor; +template +using EigenVector = framework::EigenVector; + template -class SigmoidKernel : public OpKernel { +class SigmoidKernel : public framework::OpKernel { public: - void Compute(const ExecutionContext& context) const override { + void Compute(const framework::ExecutionContext& context) const override { auto input = context.Input(0); auto output = context.Output(0); output->mutable_data(context.GetPlace()); @@ -37,9 +42,9 @@ class SigmoidKernel : public OpKernel { }; template -class SigmoidGradKernel : public OpKernel { +class SigmoidGradKernel : public framework::OpKernel { public: - void Compute(const ExecutionContext& context) const override { + void Compute(const framework::ExecutionContext& context) const override { auto Y_t = context.Input("Y"); auto dY_t = context.Input(framework::GradVarName("Y")); auto dX_t = context.Output(framework::GradVarName("X")); diff --git a/paddle/operators/softmax_op.cc b/paddle/operators/softmax_op.cc index a070458f5e55cf47253ab0df5af7a1163b4f8092..3dd4e86918a86f408e7867d15b4fdc8f9cbbb5ce 100644 --- a/paddle/operators/softmax_op.cc +++ b/paddle/operators/softmax_op.cc @@ -17,22 +17,23 @@ limitations under the License. */ namespace paddle { namespace operators { -class SoftmaxOp : public OperatorWithKernel { +class SoftmaxOp : public framework::OperatorWithKernel { protected: - void InferShape(const InferShapeContext &ctx) const override { - PADDLE_ENFORCE(ctx.InputSize() == 1UL, - "Only one input is need for softmax"); - PADDLE_ENFORCE(ctx.Input("X")->dims().size() == 2UL, - "The input of softmax op must be matrix"); - PADDLE_ENFORCE(ctx.OutputSize() == 1UL, - "Only one output is need for softmax"); + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_EQ(ctx.InputSize(), 1UL, + "Only one input is need for softmax"); + PADDLE_ENFORCE_EQ(ctx.Input("X")->dims().size(), 2UL, + "The input of softmax op must be matrix"); + PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1UL, + "Only one output is need for softmax"); ctx.Output("Y")->Resize(ctx.Input("X")->dims()); } }; -class SoftmaxOpMaker : public OpProtoAndCheckerMaker { +class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { public: - SoftmaxOpMaker(OpProto *proto, OpAttrChecker *op_checker) + SoftmaxOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "input of softmax"); AddOutput("Y", "output of softmax"); @@ -40,16 +41,16 @@ class SoftmaxOpMaker : public OpProtoAndCheckerMaker { } }; -class SoftmaxOpGrad : public OperatorWithKernel { +class SoftmaxOpGrad : public framework::OperatorWithKernel { protected: - void InferShape(const InferShapeContext &ctx) const override { - PADDLE_ENFORCE(ctx.InputSize() == 3UL, - "Input of SoftmaxOpGrad should be 3, X, Y, YG"); - PADDLE_ENFORCE(ctx.OutputSize() == 1UL, - "Output of SoftmaxOpGrad should be 1"); - PADDLE_ENFORCE(ctx.InputVar("Y") != nullptr, "Input(Y) should not be null"); - PADDLE_ENFORCE(ctx.InputVar(framework::GradVarName("Y")) != nullptr, - "Input(Y@GRAD) should not be null"); + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_EQ(ctx.InputSize(), 3UL, + "Input of SoftmaxOpGrad should be 3, X, Y, YG"); + PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1UL, + "Output of SoftmaxOpGrad should be 1"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should not be null"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Y")), + "Input(Y@GRAD) should not be null"); PADDLE_ENFORCE(ctx.Input("Y")->dims() == ctx.Input(framework::GradVarName("Y"))->dims(), "the shape of Input(0) and Input(1) should be the same"); @@ -61,8 +62,11 @@ class SoftmaxOpGrad : public OperatorWithKernel { } // namespace operators } // namespace paddle +namespace ops = paddle::operators; + REGISTER_OP(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker); -REGISTER_OP_CPU_KERNEL(softmax, ops::SoftmaxKernel); +REGISTER_OP_CPU_KERNEL(softmax, + ops::SoftmaxKernel); REGISTER_GRADIENT_OP(softmax, softmax_grad, ops::SoftmaxOpGrad); -REGISTER_OP_CPU_KERNEL(softmax_grad, - ops::SoftmaxGradKernel); +REGISTER_OP_CPU_KERNEL( + softmax_grad, ops::SoftmaxGradKernel); diff --git a/paddle/operators/softmax_op.cu b/paddle/operators/softmax_op.cu index b79228580a7ea0f70b62eb2dc7a61cf85bc0b5fb..2e99a89699dbdcafc8055c47debf9e49f10507e6 100644 --- a/paddle/operators/softmax_op.cu +++ b/paddle/operators/softmax_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,9 +13,11 @@ limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/framework/op_registry.h" #include "paddle/operators/softmax_op.h" -REGISTER_OP_GPU_KERNEL(softmax, ops::SoftmaxKernel); -REGISTER_OP_GPU_KERNEL(softmax_grad, - ops::SoftmaxGradKernel); +namespace ops = paddle::operators; + +REGISTER_OP_GPU_KERNEL(softmax, + ops::SoftmaxKernel); +REGISTER_OP_GPU_KERNEL( + softmax_grad, ops::SoftmaxGradKernel); diff --git a/paddle/operators/softmax_op.h b/paddle/operators/softmax_op.h index b2dbcf57edf1a64da8da0d9a4c14d708eec17f3f..4fa6b59540498638c3b7df639ae10a66c0fa1c16 100644 --- a/paddle/operators/softmax_op.h +++ b/paddle/operators/softmax_op.h @@ -13,19 +13,21 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once - -#include "paddle/framework/ddim.h" -#include "paddle/framework/operator.h" -#include "paddle/framework/tensor.h" -#include "paddle/operators/type_alias.h" +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { +using Tensor = framework::Tensor; +template +using EigenMatrix = framework::EigenMatrix; + template -class SoftmaxKernel : public OpKernel { +class SoftmaxKernel : public framework::OpKernel { public: - void Compute(const ExecutionContext& context) const override { + void Compute(const framework::ExecutionContext& context) const override { auto input = context.Input("X"); auto output = context.Output("Y"); output->mutable_data(context.GetPlace()); @@ -62,9 +64,9 @@ class SoftmaxKernel : public OpKernel { }; template -class SoftmaxGradKernel : public OpKernel { +class SoftmaxGradKernel : public framework::OpKernel { public: - void Compute(const ExecutionContext& context) const override { + void Compute(const framework::ExecutionContext& context) const override { std::shared_ptr scale_ = std::make_shared(); auto Y = context.Input("Y"); diff --git a/paddle/operators/type_alias.h b/paddle/operators/type_alias.h deleted file mode 100644 index eac12d35dd8d2977191218167ebb0a6e638d5d73..0000000000000000000000000000000000000000 --- a/paddle/operators/type_alias.h +++ /dev/null @@ -1,55 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -#pragma once - -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/net_op.h" - -namespace paddle { -namespace operators { - -using OpKernel = framework::OpKernel; -using OperatorBase = framework::OperatorBase; -using InferShapeContext = framework::InferShapeContext; -using ExecutionContext = framework::ExecutionContext; -using Variable = framework::Variable; -template -using EigenScalar = framework::EigenScalar; -template -using EigenVector = framework::EigenVector; -template -using EigenMatrix = framework::EigenMatrix; -template -using EigenTensor = framework::EigenTensor; -using Tensor = framework::Tensor; -using Scope = framework::Scope; -using OperatorWithKernel = framework::OperatorWithKernel; -using OperatorBase = framework::OperatorBase; -using OpProtoAndCheckerMaker = framework::OpProtoAndCheckerMaker; -using OpProto = framework::OpProto; -using OpAttrChecker = framework::OpAttrChecker; -using CPUPlace = platform::CPUPlace; -using GPUPlace = platform::GPUPlace; -using OpRegistry = framework::OpRegistry; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..405b84b76d2e24db25d2ff16e99495f2f132ef09 --- /dev/null +++ b/paddle/operators/uniform_random_op.cc @@ -0,0 +1,84 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include +#include +#include "paddle/framework/op_registry.h" +#include "paddle/framework/operator.h" + +namespace paddle { +namespace operators { + +// It seems that Eigen::Tensor::random in GPU will SEGFAULT. +// Use std::random and thrust::random(thrust is a std library in CUDA) to +// implement uniform random. +template +class CPUUniformRandomKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* tensor = context.Output(0); + T* data = tensor->mutable_data(context.GetPlace()); + unsigned int seed = + static_cast(context.op_.GetAttr("seed")); + std::minstd_rand engine; + if (seed == 0) { + seed = std::random_device()(); + } + engine.seed(seed); + std::uniform_real_distribution dist( + static_cast(context.op_.GetAttr("min")), + static_cast(context.op_.GetAttr("max"))); + for (ssize_t i = 0; i < framework::product(tensor->dims()); ++i) { + data[i] = dist(engine); + } + } +}; + +class UniformRandomOp : public framework::OperatorWithKernel { + protected: + void InferShape(const framework::InferShapeContext& ctx) const override { + PADDLE_ENFORCE(GetAttr("min") < GetAttr("max"), + "uniform_random's min must less then max"); + auto* tensor = ctx.Output(0); + auto dims = GetAttr>("dims"); + tensor->Resize(framework::make_ddim(dims)); + } +}; + +class UniformRandomOpMaker : public framework::OpProtoAndCheckerMaker { + public: + UniformRandomOpMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { + AddOutput("Out", "The output tensor of uniform random op"); + AddComment(R"DOC(Uniform random operator. + +Used to initialize tensor with uniform random generator. +)DOC"); + AddAttr>("dims", "the dimension of random tensor"); + AddAttr("min", "Minimum value of uniform random").SetDefault(-1.0f); + AddAttr("max", "Maximun value of uniform random").SetDefault(1.0f); + AddAttr("seed", + "Random seed of uniform random. " + "0 means generate a seed by system") + .SetDefault(0); + } +}; +} // namespace operators +} // namespace paddle + +REGISTER_OP(uniform_random, paddle::operators::UniformRandomOp, + paddle::operators::UniformRandomOpMaker); +REGISTER_OP_CPU_KERNEL(uniform_random, + paddle::operators::CPUUniformRandomKernel); diff --git a/paddle/operators/uniform_random_op.cu b/paddle/operators/uniform_random_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..f1a63e52ec0d3d46a505a89d7d7916bf93a58221 --- /dev/null +++ b/paddle/operators/uniform_random_op.cu @@ -0,0 +1,70 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include +#include +#include +#include +#include "paddle/framework/op_registry.h" +#include "paddle/framework/operator.h" + +namespace paddle { +namespace operators { + +template +struct UniformGenerator { + T min_, max_; + unsigned int seed_; + + __host__ __device__ UniformGenerator(T min, T max, int seed) + : min_(min), max_(max), seed_(seed) {} + + __host__ __device__ T operator()(const unsigned int n) const { + thrust::minstd_rand rng; + rng.seed(seed_); + thrust::uniform_real_distribution dist(min_, max_); + rng.discard(n); + return dist(rng); + } +}; + +// It seems that Eigen::Tensor::random in GPU will SEGFAULT. +// Use std::random and thrust::random(thrust is a std library in CUDA) to +// implement uniform random. +template +class GPUUniformRandomKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* tensor = context.Output(0); + T* data = tensor->mutable_data(context.GetPlace()); + unsigned int seed = + static_cast(context.op_.GetAttr("seed")); + if (seed == 0) { + seed = std::random_device()(); + } + T min = static_cast(context.op_.GetAttr("min")); + T max = static_cast(context.op_.GetAttr("max")); + thrust::counting_iterator index_sequence_begin(0); + ssize_t N = framework::product(tensor->dims()); + thrust::transform(index_sequence_begin, index_sequence_begin + N, + thrust::device_ptr(data), + UniformGenerator(min, max, seed)); + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OP_GPU_KERNEL(uniform_random, + paddle::operators::GPUUniformRandomKernel); diff --git a/paddle/parameter/Argument.cpp b/paddle/parameter/Argument.cpp index ef72b973c1a465a8ac03cae1070429160eac0ac1..0547ac93cd183afbcede41d280c6b4b16ed7dab1 100644 --- a/paddle/parameter/Argument.cpp +++ b/paddle/parameter/Argument.cpp @@ -666,4 +666,24 @@ void Argument::subArgFrom(const Argument& input, } } +void Argument::reorganizeSeqInfo( + const ICpuGpuVectorPtr seqStartPos, + const ICpuGpuVectorPtr subSeqStartPos, + std::vector>& reorganizedSeqInfo) { + int* seqStarts = seqStartPos->getMutableData(false); + int* subSeqStarts = subSeqStartPos->getMutableData(false); + + int seqNum = seqStartPos->getSize() - 1; + reorganizedSeqInfo.resize(seqNum, std::vector()); + int seqIdx = 0; + for (size_t i = 0; i < subSeqStartPos->getSize(); ++i) { + reorganizedSeqInfo[seqIdx].push_back(subSeqStarts[i]); + if (subSeqStarts[i] == seqStarts[seqIdx + 1]) { + seqIdx++; + if (seqIdx == seqNum) return; + reorganizedSeqInfo[seqIdx].push_back(subSeqStarts[i]); + } + } +} + } // namespace paddle diff --git a/paddle/parameter/Argument.h b/paddle/parameter/Argument.h index 0ccdef802e71b659788cfd24f28ebe43e1917db1..d8d7a4398f99a2794c5d25528a7d582f5ed629ba 100644 --- a/paddle/parameter/Argument.h +++ b/paddle/parameter/Argument.h @@ -317,6 +317,30 @@ struct Argument { */ void printValueString(std::ostream& stream, const std::string& prefix = "") const; + + /** + * @brief reorganizeSeqInfo will reorganize sequenceStartPositions and + * subSequenceStartPositions into a 2 dimensional arrary: reorganizedSeqInfo. + * + * @param seqStartPos: sequenceStartPositions of an Argument. + * @param subSeqStartPos: subSequenceStartPositions of an Argument. + * @param the reorganized sequence start position information. + * + * Examples: + * seqStartPos: [0, 4, 15, 20, 28] + * subSeqStartPos: [0, 3, 4, 5, 7, 10, 15, 20, 22, 23, 25, 28] + * reorganizedSeqInfo: + * [ + * [0,3,4], + * [4,5,7,10,15], + * [15,20], + * [20,22,23,25,28] + * ] + */ + static void reorganizeSeqInfo( + const ICpuGpuVectorPtr seqStartPos, + const ICpuGpuVectorPtr subSeqStartPos, + std::vector>& reorganizedSeqInfo); }; } // namespace paddle diff --git a/paddle/platform/CMakeLists.txt b/paddle/platform/CMakeLists.txt index bd77bb7daa50e0b273f110624ddf6f4b79a3ceab..4154aad15c39119e2f155cb2c7b5177b5aa78022 100644 --- a/paddle/platform/CMakeLists.txt +++ b/paddle/platform/CMakeLists.txt @@ -8,7 +8,7 @@ cc_test(place_test SRCS place_test.cc DEPS place glog gflags) add_subdirectory(dynload) -cc_test(enforce_test SRCS enforce_test.cc) +cc_test(enforce_test SRCS enforce_test.cc DEPS stringpiece) IF(WITH_GPU) set(GPU_CTX_DEPS dynload_cuda dynamic_loader) diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index bc0715656a7d61774d53d4a0643ec1c105706085..d2adb997de8e36922d5056b20f238a82eee74f8c 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -187,25 +187,16 @@ inline void throw_on_error(T e) { __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <, >=, __VA_ARGS__) #define PADDLE_ENFORCE_LE(__VAL0, __VAL1, ...) \ __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <=, >, __VA_ARGS__) - -// if two values have different data types, choose a compatible type for them. -template -struct CompatibleType { - static const bool t1_to_t2 = std::is_convertible::value; - typedef typename std::conditional::type type; -}; +#define PADDLE_ENFORCE_NOT_NULL(__VAL, ...) \ + PADDLE_ENFORCE(nullptr != (__VAL), #__VAL " should not be null\n%s", \ + paddle::string::Sprintf("" __VA_ARGS__)); #define __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, __CMP, __INV_CMP, ...) \ - PADDLE_ENFORCE(__COMPATIBLE_TYPE(__VAL0, __VAL1, __VAL0) \ - __CMP __COMPATIBLE_TYPE(__VAL0, __VAL1, __VAL1), \ + PADDLE_ENFORCE(__VAL0 __CMP __VAL1, \ "enforce %s " #__CMP " %s failed, %s " #__INV_CMP " %s\n%s", \ #__VAL0, #__VAL1, std::to_string(__VAL0), \ std::to_string(__VAL1), \ paddle::string::Sprintf("" __VA_ARGS__)); -#define __COMPATIBLE_TYPE(__VAL0, __VAL1, __VAL) \ - typename paddle::platform::CompatibleType::type(__VAL) - } // namespace platform } // namespace paddle diff --git a/paddle/platform/enforce_test.cc b/paddle/platform/enforce_test.cc index 7117b49474044af08ae9db79c2fae6693e966af2..5408fce558ab0d9c369aaba22374315fe553ce0f 100644 --- a/paddle/platform/enforce_test.cc +++ b/paddle/platform/enforce_test.cc @@ -9,8 +9,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/platform/enforce.h" +#include + #include "gtest/gtest.h" +#include "paddle/platform/enforce.h" +#include "paddle/string/piece.h" + +using StringPiece = paddle::string::Piece; +using paddle::string::HasPrefix; TEST(ENFORCE, OK) { PADDLE_ENFORCE(true, "Enforce is ok %d now %f", 123, 0.345); @@ -20,19 +26,15 @@ TEST(ENFORCE, OK) { } TEST(ENFORCE, FAILED) { - bool in_catch = false; + bool caught_exception = false; try { PADDLE_ENFORCE(false, "Enforce is not ok %d at all", 123); } catch (paddle::platform::EnforceNotMet error) { - // your error handling code here - in_catch = true; - std::string msg = "Enforce is not ok 123 at all"; - const char* what = error.what(); - for (size_t i = 0; i < msg.length(); ++i) { - ASSERT_EQ(what[i], msg[i]); - } + caught_exception = true; + EXPECT_TRUE( + HasPrefix(StringPiece(error.what()), "Enforce is not ok 123 at all")); } - ASSERT_TRUE(in_catch); + EXPECT_TRUE(caught_exception); } TEST(ENFORCE, NO_ARG_OK) { @@ -45,41 +47,27 @@ TEST(ENFORCE, NO_ARG_OK) { TEST(ENFORCE_EQ, NO_EXTRA_MSG_FAIL) { int a = 2; - bool in_catch = false; - + bool caught_exception = false; try { PADDLE_ENFORCE_EQ(a, 1 + 3); - } catch (paddle::platform::EnforceNotMet error) { - in_catch = true; - const std::string msg = "enforce a == 1 + 3 failed, 2 != 4"; - const char* what = error.what(); - for (size_t i = 0; i < msg.length(); ++i) { - ASSERT_EQ(what[i], msg[i]); - } + caught_exception = true; + HasPrefix(StringPiece(error.what()), "enforce a == 1 + 3 failed, 2 != 4"); } - - ASSERT_TRUE(in_catch); + EXPECT_TRUE(caught_exception); } TEST(ENFORCE_EQ, EXTRA_MSG_FAIL) { int a = 2; - bool in_catch = false; - + bool caught_exception = false; try { PADDLE_ENFORCE_EQ(a, 1 + 3, "%s size not match", "their"); - } catch (paddle::platform::EnforceNotMet error) { - in_catch = true; - const std::string msg = - "enforce a == 1 + 3 failed, 2 != 4\ntheir size not match"; - const char* what = error.what(); - for (size_t i = 0; i < msg.length(); ++i) { - ASSERT_EQ(what[i], msg[i]); - } + caught_exception = true; + HasPrefix(StringPiece(error.what()), + "enforce a == 1 + 3 failed, 2 != 4\ntheir size not match"); } - - ASSERT_TRUE(in_catch); + EXPECT_TRUE(caught_exception); } TEST(ENFORCE_NE, OK) { @@ -87,42 +75,32 @@ TEST(ENFORCE_NE, OK) { PADDLE_ENFORCE_NE(1.0, 2UL); } TEST(ENFORCE_NE, FAIL) { - bool in_catch = false; + bool caught_exception = false; try { // 2UL here to check data type compatible PADDLE_ENFORCE_NE(1.0, 1UL); - } catch (paddle::platform::EnforceNotMet error) { - in_catch = true; - const std::string msg = "enforce 1.0 != 1UL failed, 1.000000 == 1"; - const char* what = error.what(); - for (size_t i = 0; i < msg.length(); ++i) { - ASSERT_EQ(what[i], msg[i]); - } + caught_exception = true; + EXPECT_TRUE(HasPrefix(StringPiece(error.what()), + "enforce 1.0 != 1UL failed, 1.000000 == 1")) + << error.what() << " does not have expected prefix"; } - - ASSERT_TRUE(in_catch); + EXPECT_TRUE(caught_exception); } TEST(ENFORCE_GT, OK) { PADDLE_ENFORCE_GT(2, 1); } TEST(ENFORCE_GT, FAIL) { - bool in_catch = false; - + bool caught_exception = false; try { - // 2UL here to check data type compatible PADDLE_ENFORCE_GT(1, 2UL); } catch (paddle::platform::EnforceNotMet error) { - in_catch = true; - const std::string msg = "enforce 1 > 2UL failed, 1 <= 2"; - const char* what = error.what(); - for (size_t i = 0; i < msg.length(); ++i) { - ASSERT_EQ(what[i], msg[i]); - } + caught_exception = true; + EXPECT_TRUE( + HasPrefix(StringPiece(error.what()), "enforce 1 > 2UL failed, 1 <= 2")); } - - ASSERT_TRUE(in_catch); + EXPECT_TRUE(caught_exception); } TEST(ENFORCE_GE, OK) { @@ -132,21 +110,16 @@ TEST(ENFORCE_GE, OK) { PADDLE_ENFORCE_GE(3.21, 2UL); } TEST(ENFORCE_GE, FAIL) { - bool in_catch = false; - + bool caught_exception = false; try { PADDLE_ENFORCE_GE(1, 2UL); } catch (paddle::platform::EnforceNotMet error) { - in_catch = true; - const std::string msg = "enforce 1 >= 2UL failed, 1 < 2"; - const char* what = error.what(); - for (size_t i = 0; i < msg.length(); ++i) { - ASSERT_EQ(what[i], msg[i]); - } + caught_exception = true; + EXPECT_TRUE( + HasPrefix(StringPiece(error.what()), "enforce 1 >= 2UL failed, 1 < 2")); } - - ASSERT_TRUE(in_catch); + EXPECT_TRUE(caught_exception); } TEST(ENFORCE_LE, OK) { @@ -157,21 +130,16 @@ TEST(ENFORCE_LE, OK) { PADDLE_ENFORCE_LE(2UL, 3.2); } TEST(ENFORCE_LE, FAIL) { - bool in_catch = false; - + bool caught_exception = false; try { PADDLE_ENFORCE_GT(1, 2UL); } catch (paddle::platform::EnforceNotMet error) { - in_catch = true; - const std::string msg = "enforce 1 > 2UL failed, 1 <= 2"; - const char* what = error.what(); - for (size_t i = 0; i < msg.length(); ++i) { - ASSERT_EQ(what[i], msg[i]); - } + caught_exception = true; + EXPECT_TRUE( + HasPrefix(StringPiece(error.what()), "enforce 1 > 2UL failed, 1 <= 2")); } - - ASSERT_TRUE(in_catch); + EXPECT_TRUE(caught_exception); } TEST(ENFORCE_LT, OK) { @@ -180,19 +148,31 @@ TEST(ENFORCE_LT, OK) { PADDLE_ENFORCE_LT(2UL, 3); } TEST(ENFORCE_LT, FAIL) { - bool in_catch = false; - + bool caught_exception = false; try { PADDLE_ENFORCE_LT(1UL, 0.12); - } catch (paddle::platform::EnforceNotMet error) { - in_catch = true; - const std::string msg = "enforce 1UL < 0.12 failed, 1 >= 0.12"; - const char* what = error.what(); - for (size_t i = 0; i < msg.length(); ++i) { - ASSERT_EQ(what[i], msg[i]); - } + caught_exception = true; + EXPECT_TRUE(HasPrefix(StringPiece(error.what()), + "enforce 1UL < 0.12 failed, 1 >= 0.12")); } + EXPECT_TRUE(caught_exception); +} + +TEST(ENFORCE_NOT_NULL, OK) { + int* a = new int; + PADDLE_ENFORCE_NOT_NULL(a); + delete a; +} +TEST(ENFORCE_NOT_NULL, FAIL) { + bool caught_exception = false; + try { + int* a = nullptr; + PADDLE_ENFORCE_NOT_NULL(a); - ASSERT_TRUE(in_catch); + } catch (paddle::platform::EnforceNotMet error) { + caught_exception = true; + EXPECT_TRUE(HasPrefix(StringPiece(error.what()), "a should not be null")); + } + EXPECT_TRUE(caught_exception); } diff --git a/paddle/pybind/CMakeLists.txt b/paddle/pybind/CMakeLists.txt deleted file mode 100644 index 29dd0ded0ac75893da7e244d92725cd5e285efce..0000000000000000000000000000000000000000 --- a/paddle/pybind/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -cc_library(paddle_pybind SHARED - SRCS pybind.cc - DEPS pybind python backward - fc_op - sgd_op - add_op - mean_op - cross_entropy_op - recurrent_op) diff --git a/paddle/scripts/CMakeLists.txt b/paddle/scripts/CMakeLists.txt index 66a46e1883a49d491f0cb3056a7039407d72e337..a52f06fe497dac467e4ef2543ebda7a423ca326d 100644 --- a/paddle/scripts/CMakeLists.txt +++ b/paddle/scripts/CMakeLists.txt @@ -1,17 +1,15 @@ configure_file(submit_local.sh.in - submit_local.sh + paddle @ONLY) -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/submit_local.sh DESTINATION bin +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/paddle DESTINATION bin PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ - GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ - RENAME paddle) + GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ) configure_file(tools/usage_stat/usage.sh - usage.sh + paddle_usage @ONLY) -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/usage.sh DESTINATION opt/paddle/bin +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/paddle_usage DESTINATION opt/paddle/bin PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ - GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ - RENAME paddle_usage) + GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ) diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index ede9e210245df740f13ebb32c98313554f522dd9..44442be4729ff77e8d378c93acebe1486eb75397 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -33,6 +33,9 @@ Configuring cmake in /paddle/build ... -DWITH_AVX=${WITH_AVX:-OFF} -DWITH_GOLANG=${WITH_GOLANG:-OFF} -DWITH_SWIG_PY=ON + -DWITH_C_API=${WITH_C_API:-OFF} + -DWITH_PYTHON=${WITH_PYTHON:-ON} + -DWITH_SWIG_PY=${WITH_SWIG_PY:-ON} -DCUDNN_ROOT=/usr/ -DWITH_STYLE_CHECK=${WITH_STYLE_CHECK:-OFF} -DWITH_TESTING=${WITH_TESTING:-OFF} @@ -49,7 +52,9 @@ cmake .. \ -DWITH_GPU=${WITH_GPU:-OFF} \ -DWITH_AVX=${WITH_AVX:-OFF} \ -DWITH_GOLANG=${WITH_GOLANG:-OFF} \ - -DWITH_SWIG_PY=ON \ + -DWITH_SWIG_PY=${WITH_SWIG_PY:-ON} \ + -DWITH_C_API=${WITH_C_API:-OFF} \ + -DWITH_PYTHON=${WITH_PYTHON:-ON} \ -DCUDNN_ROOT=/usr/ \ -DWITH_STYLE_CHECK=${WITH_STYLE_CHECK:-OFF} \ -DWITH_TESTING=${WITH_TESTING:-OFF} \ diff --git a/paddle/scripts/docker/build_android.sh b/paddle/scripts/docker/build_android.sh index 56d290be4ab04a9f6974023159aa8571d27f8dd5..5584e29e2a155a8062f7d4f2016bd389bd9803f3 100644 --- a/paddle/scripts/docker/build_android.sh +++ b/paddle/scripts/docker/build_android.sh @@ -20,4 +20,4 @@ cmake -DCMAKE_SYSTEM_NAME=Android \ -DWITH_SWIG_PY=OFF \ .. make -j `nproc` -make install +make install -j `nproc` diff --git a/paddle/scripts/submit_local.sh.in b/paddle/scripts/submit_local.sh.in old mode 100644 new mode 100755 diff --git a/paddle/scripts/travis/build_doc.sh b/paddle/scripts/travis/build_doc.sh index 33fb5d84e2701c163b5d1b1bb3362ee81ebb34ea..dfcff38302703066e868c60e213f0f7cbc55a31e 100755 --- a/paddle/scripts/travis/build_doc.sh +++ b/paddle/scripts/travis/build_doc.sh @@ -5,15 +5,9 @@ set -e mkdir -p $TRAVIS_BUILD_DIR/build cd $TRAVIS_BUILD_DIR/build -# Compile paddle binaries first -cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_DOC=OFF -DWITH_MKLDNN=OFF -DWITH_MKLML=OFF -DWITH_GOLANG=ON -DWITH_STYLE_CHECK=OFF - -mkdir output -make -j `nproc` -find .. -name '*whl' | xargs pip install # install all wheels. -rm -rf * # Compile Documentation only. cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKLDNN=OFF -DWITH_MKLML=OFF -DWITH_DOC=ON +make -j `nproc` gen_proto_py make -j `nproc` paddle_docs paddle_docs_cn # check websites for broken links @@ -35,6 +29,7 @@ TARGET_BRANCH="gh-pages" SOURCE_BRANCH="master" # Clone the repo to output directory +mkdir output git clone $REPO output cd output diff --git a/paddle/setup.py.in b/paddle/setup.py.in deleted file mode 100644 index af107e76723135124e56db52a76e4f8aff5c4acf..0000000000000000000000000000000000000000 --- a/paddle/setup.py.in +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from setuptools import setup, Extension - -setup(name="py_paddle", - version="${PADDLE_VERSION}", - packages=['py_paddle'], - include_package_data=True, - package_data={'py_paddle':['*.py','_swig_paddle.so']}, - install_requires = [ - 'nltk>=3.2.2', - # We use `numpy.flip` in `test_image.py`. - # `numpy.flip` is introduced in `1.12.0` - 'numpy>=1.12.0', # The numpy is required. - 'protobuf==${PROTOBUF_VERSION}' # The paddle protobuf version - ], - url='http://www.paddlepaddle.org/', - license='Apache 2.0', -) diff --git a/paddle/trainer/NewRemoteParameterUpdater.cpp b/paddle/trainer/NewRemoteParameterUpdater.cpp index e1558e3fdfbcf296be0ee64202132f53bf901be9..af1dceed0284c70d68b61b9682b0cb23c28043d6 100644 --- a/paddle/trainer/NewRemoteParameterUpdater.cpp +++ b/paddle/trainer/NewRemoteParameterUpdater.cpp @@ -50,8 +50,8 @@ void NewRemoteParameterUpdater::init( // create parameter server client. if (useEtcd_) { - parameterClient_ = paddle_new_etcd_pserver_client( - (char *)pserverSpec_.c_str(), FLAGS_trainer_id == 0); + parameterClient_ = + paddle_new_etcd_pserver_client((char *)pserverSpec_.c_str()); } else { parameterClient_ = paddle_new_pserver_client((char *)pserverSpec_.c_str(), FLAGS_trainer_id == 0); diff --git a/paddle/trainer/tests/simple_sparse_neural_network.py b/paddle/trainer/tests/simple_sparse_neural_network.py index 9604e1b9b45e571130c2f1bdc6d6a5fbd9c177c4..30346ef299d0bc8585ccff7f2fc4885b0d9f9dfc 100644 --- a/paddle/trainer/tests/simple_sparse_neural_network.py +++ b/paddle/trainer/tests/simple_sparse_neural_network.py @@ -1,6 +1,6 @@ from paddle.trainer_config_helpers import * -settings(batch_size=128, learning_method=AdaGradOptimizer(), learning_rate=1e-4) +settings(batch_size=17, learning_method=AdaGradOptimizer(), learning_rate=1e-4) file_list = 'trainer/tests/fake_file_list.list' @@ -12,7 +12,7 @@ define_py_data_sources2( embedding = embedding_layer( input=data_layer( - name="word_ids", size=65536), + name="word_ids", size=8191), size=128, param_attr=ParamAttr(sparse_update=True)) prediction = fc_layer(input=embedding, size=10, act=SoftmaxActivation()) diff --git a/paddle/trainer/tests/simple_sparse_neural_network_dp.py b/paddle/trainer/tests/simple_sparse_neural_network_dp.py index 8bfd1f37e7114f2dcd0798ff1e8180b111ad988f..86b272edfe1bbb23c45cffe282f6475ceaa0cc41 100644 --- a/paddle/trainer/tests/simple_sparse_neural_network_dp.py +++ b/paddle/trainer/tests/simple_sparse_neural_network_dp.py @@ -7,15 +7,15 @@ def init_hook(settings, is_train, **kwargs): @provider( - input_types={'word_ids': integer_value(65536), + input_types={'word_ids': integer_value(8191), 'label': integer_value(10)}, min_pool_size=0, init_hook=init_hook) def process(settings, filename): if settings.is_train: - data_size = 2**20 - else: data_size = 2**10 + else: + data_size = 2**5 for _ in xrange(data_size): - yield random.randint(0, 65535), random.randint(0, 9) + yield random.randint(0, 8190), random.randint(0, 9) diff --git a/paddle/trainer/tests/test_TrainerOnePass.cpp b/paddle/trainer/tests/test_TrainerOnePass.cpp index 4d0174f784a0dc7314977d586c3ad1f0f9c69f6d..00ba61377aeff17d82e03f7560c0d71b3570d14f 100644 --- a/paddle/trainer/tests/test_TrainerOnePass.cpp +++ b/paddle/trainer/tests/test_TrainerOnePass.cpp @@ -100,25 +100,25 @@ TEST(average_window, gpu) { } TEST(average_window, gpu2) { - FLAGS_num_passes = 100; + FLAGS_num_passes = 20; trainerOnePassTest(configFile1, true, false, 2, 0.01); FLAGS_num_passes = 1; } TEST(average_window, gpu4) { - FLAGS_num_passes = 100; + FLAGS_num_passes = 20; trainerOnePassTest(configFile1, true, false, 4, 0.01); FLAGS_num_passes = 1; } TEST(average_window_cpu, gpu2) { - FLAGS_num_passes = 100; + FLAGS_num_passes = 20; trainerOnePassTest(configFile1, true, false, 2, 0.01, true); FLAGS_num_passes = 1; } TEST(average_window_cpu, gpu4) { - FLAGS_num_passes = 100; + FLAGS_num_passes = 20; trainerOnePassTest(configFile1, true, false, 4, 0.01, true); FLAGS_num_passes = 1; } diff --git a/proto/CMakeLists.txt b/proto/CMakeLists.txt index 18584cafe7971bad281b498908c54780250791b7..e1cea8bd0de5394020a498725485cea025512e48 100644 --- a/proto/CMakeLists.txt +++ b/proto/CMakeLists.txt @@ -17,7 +17,7 @@ foreach(filename ${proto_filenames}) COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} ARGS "--python_out=${PROJ_ROOT}/python/paddle/proto" "-I" ${CMAKE_CURRENT_SOURCE_DIR} ${ABS_FIL} - DEPENDS ${ABS_FIL} ${external_project_dependencies}) + DEPENDS ${ABS_FIL} protoc) endforeach() add_custom_target(gen_proto_py ALL DEPENDS ${PROTO_GEN_PY}) diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 0171f9d8ccd6045cb876d57684269a2a49e77f96..b5030da8e75eb94e857ae4effc6adb6d19dc0e93 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -39,7 +39,7 @@ add_custom_command(OUTPUT ${OUTPUT_DIR}/.timestamp DEPENDS gen_proto_py copy_paddle_pybind framework_py_proto ${PY_FILES} ${external_project_dependencies} ${COPY_PADDLE_MASTER}) add_custom_target(paddle_python ALL DEPENDS - ${OUTPUT_DIR}/.timestamp) + ${OUTPUT_DIR}/.timestamp paddle_pserver_main paddle_trainer paddle_merge_model python_api_wheel) set(PADDLE_PYTHON_PACKAGE_DIR ${CMAKE_CURRENT_BINARY_DIR}/dist/) diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 9ea69fc5e57636c22fb20d5d97de760b9cc3bcde..b7b696ef0c13e1bae2e910e08d1a1ea3e45cd5d5 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -2657,6 +2657,31 @@ class SubSequenceLayer(LayerBase): self.create_bias_parameter(bias, size) +@config_layer('sub_nested_seq') +class SubNestedSequenceLayer(LayerBase): + def __init__(self, name, inputs, selected_indices, bias=False, **xargs): + if isinstance(inputs, list): + assert len(inputs) == 1, ('the first input of sub_nested_seq ' + 'layer is a single nested sequence.') + inputs = inputs[0] + if isinstance(selected_indices, list): + assert len(selected_indices) == 1, ( + 'the second input of ' + 'sub_nested_seq layer is a single layer which is a ' + 'set of selected indices.') + selected_indices = selected_indices[0] + + super(SubNestedSequenceLayer, self).__init__( + name, + 'sub_nested_seq', + 0, + inputs=[inputs, selected_indices], + **xargs) + input_layer0 = self.get_input_layer(0) + size = input_layer0.size + self.set_layer_size(size) + + @config_layer('out_prod') class OuterProdLayer(LayerBase): def __init__(self, name, inputs, device=None): @@ -3223,6 +3248,16 @@ class CTCLayer(LayerBase): config_assert(len(self.inputs) == 2, 'CTCLayer must have 2 inputs') +@config_layer('kmax_seq_score') +class KmaxSeqScoreLayer(LayerBase): + def __init__(self, name, inputs, beam_size, **xargs): + super(KmaxSeqScoreLayer, self).__init__( + name, 'kmax_seq_score', 0, inputs=inputs, **xargs) + config_assert( + len(self.inputs) == 1, 'KmaxSeqScoreLayer has only one input.') + self.config.beam_size = beam_size + + @config_layer('warp_ctc') class WarpCTCLayer(LayerBase): def __init__(self, diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index ea5fdcc50f6abbc67fb61b7fd56c100d9f9811d0..1bc55c869601551aff5fc0311458f906385522d2 100755 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -129,8 +129,10 @@ __all__ = [ 'prelu_layer', 'gated_unit_layer', 'crop_layer', + 'sub_nested_seq_layer', 'clip_layer', 'slice_projection', + 'kmax_sequence_score_layer', ] @@ -224,8 +226,11 @@ class LayerType(object): PRELU = 'prelu' CROP_LAYER = 'crop' + SUB_NESTED_SEQ = 'sub_nested_seq' CLIP_LAYER = 'clip' + KMAX_SEQ_SCORE = 'kmax_seq_score' + @staticmethod def is_layer_type(type_name): """ @@ -6088,6 +6093,53 @@ def crop_layer(input, offset, axis=2, shape=None, name=None, layer_attr=None): size=l.config.size) +@wrap_name_default() +@layer_support() +def sub_nested_seq_layer(input, selected_indices, name=None): + """ + The sub_nested_seq_layer accepts two inputs: the first one is a nested + sequence; the second one is a set of selceted indices in the nested sequence. + + Then sub_nest_seq_layer trims the first nested sequence input according + to the selected indices to form a new output. This layer is useful in + beam training. + + The example usage is: + + .. code-block:: python + + sub_nest_seq = sub_nested_seq_layer(input=[data, selected_indices]) + + + :param input: A nested sequence. + :type input: LayerOutput + :param selected_indices: a set of sequence indices in the nested sequence. + :type input: LayerOutput + :param name: name of this layer. + :type name: basestring + :return: LayerOutput object. + :rtype: LayerOutput + """ + + assert isinstance(input, LayerOutput), ( + 'The first input of ' + 'sub_nested_seq_layer must be a Paddle layer.') + assert isinstance(selected_indices, LayerOutput), ( + 'The second input of ' + 'sub_nested_seq_layer must be a Paddle layer.') + + l = Layer( + inputs=input.name, + selected_indices=selected_indices.name, + name=name, + type=LayerType.SUB_NESTED_SEQ) + return LayerOutput( + name=name, + layer_type=LayerType.SUB_NESTED_SEQ, + parents=input, + size=l.config.size) + + @wrap_name_default("clip") def clip_layer(input, min, max, name=None): """ @@ -6109,7 +6161,8 @@ def clip_layer(input, min, max, name=None): :type min: double :param max: The upper threshold for clipping. :type max: double - :return: LayerOutput + :return: LayerOutput object. + :rtype: LayerOutput """ Layer( name=name, @@ -6119,3 +6172,41 @@ def clip_layer(input, min, max, name=None): max=max) return LayerOutput( name, LayerType.CLIP_LAYER, parents=[input], size=input.size) + + +@wrap_name_default() +@layer_support() +def kmax_sequence_score_layer(input, name=None, beam_size=1): + """ + This layer accepts one input which are scores over a sequence or a nested + sequence, and returns indices of beam_size sequences with highest scores. + + .. code-block:: python + + kmax_indices = kmax_sequence_score_layer(input=input_layer, beam_size) + + + :param name: The Layer Name. + :type name: basestring + :param input: The input layer. It stores scores over a sequence or a nested + sequence and its size must be 1. + :type input: LayerOutput. + :param beam_size: squence indices with top beam_size scores are returned. + :type beam_size: double + :return: LayerOutput object. + :rtype: LayerOutput + """ + assert isinstance(input, LayerOutput), ("kmax_sequence_score_layer " + "accepts only one input.") + assert input.size == 1, ( + "input of kmax_sequence_score_layer is a score" + "over a sequence or a nested sequence, so its width must be 1.") + + Layer( + name=name, + type=LayerType.KMAX_SEQ_SCORE, + inputs=[input.name], + beam_size=beam_size) + + return LayerOutput( + name, LayerType.KMAX_SEQ_SCORE, parents=[input], size=input.size) diff --git a/python/paddle/trainer_config_helpers/tests/configs/file_list.sh b/python/paddle/trainer_config_helpers/tests/configs/file_list.sh index 0ffa58bc1e2088f75e7cd25c7ecdffbe270825a4..a61beb871ad064c617fa141451afcb2a5ac64854 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/file_list.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/file_list.sh @@ -7,6 +7,7 @@ test_rnn_group shared_fc shared_lstm shared_gru test_cost_layers_with_weight test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops test_seq_concat_reshape test_pad test_smooth_l1 test_multiplex_layer test_prelu_layer test_row_conv test_detection_output_layer test_multibox_loss_layer -test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer) +test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer +test_kmax_seq_socre_layer test_seq_select_layers) export whole_configs=(test_split_datasource) diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_kmax_seq_socre_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_kmax_seq_socre_layer.protostr new file mode 100644 index 0000000000000000000000000000000000000000..81bd71f68eb3f2c04ccd46ee3b77a07543395c60 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_kmax_seq_socre_layer.protostr @@ -0,0 +1,66 @@ +type: "nn" +layers { + name: "input" + type: "data" + size: 300 + active_type: "" +} +layers { + name: "data" + type: "data" + size: 128 + active_type: "" +} +layers { + name: "__fc_layer_0__" + type: "fc" + size: 1 + active_type: "exponential" + inputs { + input_layer_name: "data" + input_parameter_name: "___fc_layer_0__.w0" + } + bias_parameter_name: "___fc_layer_0__.wbias" +} +layers { + name: "__kmax_sequence_score_layer_0__" + type: "kmax_seq_score" + active_type: "" + inputs { + input_layer_name: "__fc_layer_0__" + } + beam_size: 5 +} +parameters { + name: "___fc_layer_0__.w0" + size: 128 + initial_mean: 0.0 + initial_std: 0.0883883476483 + dims: 128 + dims: 1 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___fc_layer_0__.wbias" + size: 1 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 1 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "data" +output_layer_names: "__kmax_sequence_score_layer_0__" +sub_models { + name: "root" + layer_names: "input" + layer_names: "data" + layer_names: "__fc_layer_0__" + layer_names: "__kmax_sequence_score_layer_0__" + input_layer_names: "data" + output_layer_names: "__kmax_sequence_score_layer_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_seq_select_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_seq_select_layers.protostr new file mode 100644 index 0000000000000000000000000000000000000000..4b906b113e3c0569d5576127e100d097e4923436 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_seq_select_layers.protostr @@ -0,0 +1,37 @@ +type: "nn" +layers { + name: "input_seq" + type: "data" + size: 300 + active_type: "" +} +layers { + name: "input" + type: "data" + size: 5 + active_type: "" +} +layers { + name: "__sub_nested_seq_layer_0__" + type: "sub_nested_seq" + size: 300 + active_type: "" + inputs { + input_layer_name: "input_seq" + } + inputs { + input_layer_name: "input" + } +} +input_layer_names: "input_seq" +output_layer_names: "__sub_nested_seq_layer_0__" +sub_models { + name: "root" + layer_names: "input_seq" + layer_names: "input" + layer_names: "__sub_nested_seq_layer_0__" + input_layer_names: "input_seq" + output_layer_names: "__sub_nested_seq_layer_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_kmax_seq_socre_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_kmax_seq_socre_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..d245c5a41c793e1f02f306bfe64071bd9885906e --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_kmax_seq_socre_layer.py @@ -0,0 +1,11 @@ +#!/usr/bin/env python +#coding=utf-8 +from paddle.trainer_config_helpers import * + +data = data_layer(name='input', size=300) + +data = data_layer(name="data", size=128) +scores = fc_layer(input=data, size=1, act=ExpActivation()) +kmax_seq_id = kmax_sequence_score_layer(input=scores, beam_size=5) + +outputs(kmax_seq_id) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_seq_select_layers.py b/python/paddle/trainer_config_helpers/tests/configs/test_seq_select_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..6d1c3175ba9801d69f3f9cb9e754858253192270 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_seq_select_layers.py @@ -0,0 +1,11 @@ +#!/usr/bin/env python +#coding=utf-8 +from paddle.trainer_config_helpers import * + +beam_size = 5 + +data = data_layer(name='input_seq', size=300) +selected_ids = data_layer(name='input', size=beam_size) +sub_nest_seq = sub_nested_seq_layer(input=data, selected_indices=selected_ids) + +outputs(sub_nest_seq) diff --git a/python/paddle/v2/framework/network.py b/python/paddle/v2/framework/network.py deleted file mode 100644 index cfeb0e3dec0fd2c6ad4d2d2501f97932495fdd41..0000000000000000000000000000000000000000 --- a/python/paddle/v2/framework/network.py +++ /dev/null @@ -1,131 +0,0 @@ -import paddle.v2.framework.core as core -from paddle.v2.framework.create_op_creation_methods import op_creations -from default_scope_funcs import new_var, find_var, get_cur_scope - -__all__ = ['Network'] # Only expose Network - - -class NetworkFunctor(object): - """ - Network Op Creation Function. Used internally in this module. - It convert string input to Variable. If it is not created before, just - create in scope. - - It is a functor object. means the instances are callable. - - :param func: The op creation function which generated in Python. - :param net: The Network instance. - """ - - def __init__(self, func, net): - self.func = func - self.net = net - - def __call__(self, *args, **kwargs): - if len(args) != 0: - raise ValueError("Paddle must use keyword argument") - inputs = self.func.all_input_args - for ipt in inputs: - if ipt in kwargs: - var = kwargs[ipt] - if isinstance(var, basestring): - tmp = new_var(var) - self.net.var_names[tmp] = var - var = tmp - - if not isinstance(var, core.Variable): - raise TypeError( - "Input of op creation must be string or variable") - - kwargs[ipt] = self.net.var_names[var] - - notemp_outputs = self.func.all_not_temp_output_args - - for name in notemp_outputs: - if name not in kwargs: - kwargs[ - name] = self.func.__name__ + "@OUT@%d" % core.unique_integer( - ) - - outputs = self.func.all_output_args - for opt in outputs: - if opt in kwargs: - var = kwargs[opt] - if isinstance(var, basestring): - tmp = new_var(var) - self.net.var_names[tmp] = var - var = tmp - - if not isinstance(var, core.Variable): - raise TypeError( - "Output of op creation must be string or variable") - kwargs[opt] = self.net.var_names[var] - - op = self.func(**kwargs) - - self.net.net.add_op(op) - - lst = [find_var(kwargs[opt]) for opt in notemp_outputs] - if len(lst) == 1: - return lst[0] - elif len(lst) == 0: - return None - else: - return lst - - -class Network(object): - """ - The network concept. It avoid user to manually create operator, create - variable, and combine them into a Net. Just use Network.xxx can create the - operator, create variables in default scope, and add them into `self.net`. - - For example: - - .. code-block: python - - net = Network() - out = net.add_two(X="a", Y="b") - fc_out = net.fc(X="out", W="fc.w") - - net.run(...) - """ - - def __init__(self): - self.net = core.Net.create() - funcs = (func_name for func_name in dir(op_creations) - if not func_name.startswith("__")) - self.var_names = dict() - - # TODO(yuyang18): This code can work, but do not generate a good - # docstring, try to give a better way generate function in runtime - # later. - for func_name in funcs: - func = getattr(op_creations, func_name) - impl = NetworkFunctor(func, self) - setattr(self, func_name, impl.__call__) - self.__complete_add_op__ = False - - def infer_shape(self): - self.complete_add_op() - self.net.infer_shape(get_cur_scope()) - - def run(self, device_context): - self.complete_add_op() - self.net.run(get_cur_scope(), device_context) - - def __str__(self): - return str(self.net) - - def complete_add_op(self): - if not self.__complete_add_op__: - self.net.complete_add_op() - self.__complete_add_op__ = True - - -if __name__ == '__main__': - net = Network() - out = net.add_two(X="a", Y="b") - fc_out = net.fc(X=out, W="fc.w", b="fc.b", activation="softmax") - net.complete_add_op() - print net diff --git a/python/paddle/v2/framework/create_op_creation_methods.py b/python/paddle/v2/framework/op.py similarity index 66% rename from python/paddle/v2/framework/create_op_creation_methods.py rename to python/paddle/v2/framework/op.py index 6fd33b366b6d376cc51ba5d663bb04d45ab8c933..7fd8b55a5d167294d3270c79f7b64da03443afd3 100644 --- a/python/paddle/v2/framework/create_op_creation_methods.py +++ b/python/paddle/v2/framework/op.py @@ -2,7 +2,6 @@ import paddle.v2.framework.core as core import paddle.v2.framework.proto.op_proto_pb2 as op_proto_pb2 import paddle.v2.framework.proto.op_desc_pb2 as op_desc_pb2 import paddle.v2.framework.proto.attribute_pb2 as attribute_pb2 -import cStringIO def get_all_op_protos(): @@ -146,64 +145,14 @@ class OpDescCreationMethod(object): return False -def get_docstring_from_op_proto(op_proto): - """ - Generate docstring from a OpProto - :param op_proto: a OpProto instance. - :type op_proto: op_proto_pb2.OpProto - :return: docstring - """ - if not isinstance(op_proto, op_proto_pb2.OpProto): - raise TypeError("Input must be OpProto") - f = cStringIO.StringIO() - f.write(op_proto.comment) - f.write("\n") - - def __append_param__(name, comment, type): - # Maybe replace the following line with template engine is better. - f.write(":param ") - f.write(name) - f.write(": ") - f.write(comment) - f.write("\n") - f.write(":type ") - f.write(name) - f.write(": ") - f.write(type) - f.write("\n") - - for ipt in op_proto.inputs: - __append_param__(ipt.name, ipt.comment, "list | basestr" - if ipt.multiple else "basestr") - - temp_var_prefix = \ - "This is a temporary variable. It does not have to set by user. " - for opt in op_proto.outputs: - __append_param__(opt.name, opt.comment if not opt.temporary else - temp_var_prefix + opt.comment, "list | basestr" - if opt.multiple else "basestr") - - for attr in op_proto.attrs: - attr_type = None - if attr.type == attribute_pb2.INT: - attr_type = "int" - elif attr.type == attribute_pb2.FLOAT: - attr_type = "float" - elif attr.type == attribute_pb2.STRING: - attr_type = "basestr" - elif attr.type == attribute_pb2.INTS: - attr_type = "list of int" - elif attr.type == attribute_pb2.FLOATS: - attr_type = "list of float" - elif attr.type == attribute_pb2.STRINGS: - attr_type = "list of basestr" - - if attr_type is None: - raise RuntimeError("Not supported attribute type " + attr.type) - - __append_param__(attr.name, attr.comment, attr_type) - - return f.getvalue() +class OpInfo(object): + def __init__(self, name, method, inputs, outputs, attrs, no_temp_outputs): + self.name = name + self.method = method + self.inputs = inputs + self.outputs = outputs + self.attrs = attrs + self.no_temp_outputs = no_temp_outputs def create_op_creation_method(op_proto): @@ -216,38 +165,57 @@ def create_op_creation_method(op_proto): opdesc = method(*args, **kwargs) return core.Operator.create(opdesc.SerializeToString()) - __impl__.__doc__ = get_docstring_from_op_proto(op_proto) - __impl__.all_input_args = [var.name for var in op_proto.inputs] - __impl__.all_output_args = [var.name for var in op_proto.outputs] - __impl__.all_attr_args = [attr.name for attr in op_proto.attrs] - __impl__.all_not_temp_output_args = [ - var.name for var in op_proto.outputs if not var.temporary - ] + return OpInfo( + method=__impl__, + name=op_proto.type, + inputs=[var.name for var in op_proto.inputs], + outputs=[var.name for var in op_proto.outputs], + attrs=[attr.name for attr in op_proto.attrs], + no_temp_outputs=[ + var.name for var in op_proto.outputs if not var.temporary + ]) - return __impl__ +class OperatorFactory(object): + def __init__(self): + self.op_methods = dict() + for op_proto in get_all_op_protos(): + method = create_op_creation_method(op_proto) + self.op_methods[method.name] = method -class OpCreationsHolder(object): - """ - A object will holds all op creation methods. - - Use `op_creations.xxx_op` to access them. - """ - pass + def __call__(self, *args, **kwargs): + if 'type' in kwargs: + if len(args) != 0: + raise ValueError("All Paddle argument should be key-word " + "argument except type") + t = kwargs.pop('type') + else: + if len(args) != 1: + raise ValueError("All Paddle argument should be key-word " + "argument except type") + t = args[0] + return self.get_op_info(t).method(**kwargs) -op_creations = OpCreationsHolder() + def types(self): + return self.op_methods.keys() + def get_op_info(self, t): + if t not in self.op_methods: + raise ValueError("operator %s is not registered", t) + return self.op_methods.get(t) -def __bootstrap__(): - """ - Bootstrap function for this module. It will dynamic create all op creation - methods in runtime. - """ - for op_proto in get_all_op_protos(): - func = create_op_creation_method(op_proto) - func.__name__ = str(op_proto.type) - setattr(op_creations, func.__name__, func) + def get_op_input_names(self, type): + return self.get_op_info(type).inputs + + def get_op_output_names(self, type): + return self.get_op_info(type).outputs + + def get_op_attr_names(self, type): + return self.get_op_info(type).attrs + + def get_op_no_temp_output_names(self, type): + return self.get_op_info(type).no_temp_outputs -__bootstrap__() +Operator = OperatorFactory() # Default global factory diff --git a/python/paddle/v2/framework/tests/CMakeLists.txt b/python/paddle/v2/framework/tests/CMakeLists.txt index 7eec37678815587b451008eef587b23bcb9beeaf..10659caa882fd3d4060f9947413a392c3b681ee8 100644 --- a/python/paddle/v2/framework/tests/CMakeLists.txt +++ b/python/paddle/v2/framework/tests/CMakeLists.txt @@ -6,7 +6,6 @@ py_test(test_scope SRCS test_scope.py) py_test(test_tensor SRCS test_tensor.py) py_test(test_mul_op SRCS test_mul_op.py) -py_test(test_network SRCS test_network.py) py_test(test_mean_op SRCS test_mean_op.py) py_test(test_protobuf SRCS test_protobuf.py) @@ -14,10 +13,13 @@ py_test(test_protobuf SRCS test_protobuf.py) py_test(test_add_two_op SRCS test_add_two_op.py) py_test(test_sigmoid_op SRCS test_sigmoid_op.py) py_test(test_softmax_op SRCS test_softmax_op.py) +py_test(test_cross_entropy_op SRCS test_cross_entropy_op.py) +py_test(test_fill_zeros_like_op SRCS test_fill_zeros_like_op.py) py_test(gradient_checker SRCS gradient_checker.py) py_test(test_rowwise_add_op SRCS test_rowwise_add_op.py) py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py) -py_test(test_op_creation_methods SRCS test_op_creation_methods.py) +py_test(test_operator SRCS test_operator.py) +py_test(test_uniform_random_op SRCS test_uniform_random_op.py) diff --git a/python/paddle/v2/framework/tests/gradient_checker.py b/python/paddle/v2/framework/tests/gradient_checker.py index 4022de1c40e41aa77a7f31d82b55b63585cbd5f5..b73c4869d14a62a951d8e45dafb14b7523355519 100644 --- a/python/paddle/v2/framework/tests/gradient_checker.py +++ b/python/paddle/v2/framework/tests/gradient_checker.py @@ -1,16 +1,31 @@ -import paddle.v2.framework.core as core -from paddle.v2.framework.create_op_creation_methods import op_creations -import numpy import unittest +import numpy +import paddle.v2.framework.core as core +from paddle.v2.framework.op import Operator + __all__ = ['get_numeric_gradient'] +def create_op(op_type): + kwargs = dict() + for in_name in Operator.get_op_input_names(op_type): + kwargs[in_name] = in_name + for out_name in Operator.get_op_output_names(op_type): + kwargs[out_name] = out_name + + return Operator(op_type, **kwargs) + + +def grad_var_name(var_name): + return var_name + "@GRAD" + + def get_numeric_gradient(op, input_values, output_name, input_to_check, - delta=1e-2, + delta=0.005, local_scope=None): """ Get Numeric Gradient for an operator's input. @@ -76,15 +91,146 @@ def get_numeric_gradient(op, return gradient_flat.reshape(tensor_to_check.get_dims()) +class GradientChecker(unittest.TestCase): + def __is_close(self, numeric_grads, scope, max_relative_error): + for name in numeric_grads: + op_grad = numpy.array( + scope.find_var(grad_var_name(name)).get_tensor()) + is_close = numpy.allclose( + numeric_grads[name], op_grad, rtol=max_relative_error, atol=100) + if not is_close: + return False + return True + + def check_grad(self, + forward_op, + input_vars, + inputs_to_check, + output_name, + no_grad_set=None, + only_cpu=False, + max_relative_error=0.005): + """ + :param forward_op: used to create backward_op + :param input_vars: numpy value of input variable. The following + computation will use these variables. + :param inputs_to_check: inputs var names that should check gradient. + :param output_name: output name that used to + :param max_relative_error: The relative tolerance parameter. + :param no_grad_set: used when create backward ops + :param only_cpu: only compute and check gradient on cpu kernel. + :return: + """ + if no_grad_set is None: + no_grad_set = set() + + tmp_outs = forward_op.temp_outputs() + no_tmp_out = filter(lambda name: name not in tmp_outs, + forward_op.outputs()) + if len(no_tmp_out) != 1: + raise ValueError("non temp out_names should be 1") + + in_names = forward_op.inputs() + for no_grad in no_grad_set: + if no_grad not in in_names: + raise ValueError("no_grad should be in in_names") + + backward_op = core.Operator.backward(forward_op, no_grad_set) + + places = [core.CPUPlace()] + if not only_cpu and core.is_compile_gpu() and backward_op.support_gpu(): + places.append(core.GPUPlace(0)) + + numeric_grad = dict() + # get numeric gradient + for check_name in inputs_to_check: + numeric_grad[check_name] = \ + get_numeric_gradient(forward_op, input_vars, output_name, check_name) + + # get operator gradient according to different device + for place in places: + scope = core.Scope() + ctx = core.DeviceContext.create(place) + + # create input var and set value + for name, value in input_vars.iteritems(): + if name not in in_names: + raise ValueError(name + " not in op.inputs_") + var = scope.new_var(name).get_tensor() + var.set_dims(value.shape) + var.set(value, place) + + # create output var + for out_name in forward_op.outputs(): + scope.new_var(out_name).get_tensor() + + # infer the shape of output var and compute/set value of output var + forward_op.infer_shape(scope) + forward_op.run(scope, ctx) + + # create output grad var + # set shape as the output var + # set value of this grad to ones + for name in forward_op.outputs(): + out_tensor = scope.find_var(name).get_tensor() + grad_tensor = scope.new_var(grad_var_name(name)).get_tensor() + grad_tensor.set_dims(out_tensor.shape()) + data = 1.0 * numpy.ones(out_tensor.shape()) + grad_tensor.set(data, place) + + # create input grad var + for name in backward_op.outputs(): + scope.new_var(name).get_tensor() + + # infer the shape of input gradient var and compute/set it's value + # with backward op + backward_op.infer_shape(scope) + backward_op.run(scope, ctx) + + if isinstance(place, core.CPUPlace): + msg = "CPU kernel gradient is not close to numeric gradient" + else: + if isinstance(place, core.GPUPlace): + msg = "GPU kernel gradient is not close to numeric gradient" + else: + raise ValueError("unknown place " + type(place)) + self.assertTrue( + self.__is_close(numeric_grad, scope, max_relative_error), msg) + + if __name__ == '__main__': class GetNumericGradientTest(unittest.TestCase): def test_add_op(self): - add_op = op_creations.add_two(X="X", Y="Y", Out="Z") + add_op = Operator('add_two', X="X", Y="Y", Out="Z") x = numpy.random.random((10, 1)).astype("float32") y = numpy.random.random((10, 1)).astype("float32") arr = get_numeric_gradient(add_op, {'X': x, "Y": y}, 'Z', 'X') self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-2) + def test_softmax_op(self): + def stable_softmax(x): + """Compute the softmax of vector x in a numerically stable way.""" + shiftx = x - numpy.max(x) + exps = numpy.exp(shiftx) + return exps / numpy.sum(exps) + + def label_softmax_grad(Y, dY): + dX = Y * 0.0 + for i in range(Y.shape[0]): + d = numpy.dot(Y[i, :], dY[i, :]) + dX[i, :] = Y[i, :] * (dY[i, :] - d) + return dX + + softmax_op = Operator("softmax", X="X", Y="Y") + + X = numpy.random.random((2, 2)).astype("float32") + Y = numpy.apply_along_axis(stable_softmax, 1, X) + dY = numpy.ones(Y.shape) + dX = label_softmax_grad(Y, dY) + + arr = get_numeric_gradient(softmax_op, {"X": X}, 'Y', 'X') + numpy.testing.assert_almost_equal(arr, dX, decimal=1e-2) + unittest.main() diff --git a/python/paddle/v2/framework/tests/op_test_util.py b/python/paddle/v2/framework/tests/op_test_util.py index e6bc7d8a9b5ddd4582a5ef8a47cb63a7e5911892..dd65e0f2dc23d3f657ff16c55fb297dae210b2d7 100644 --- a/python/paddle/v2/framework/tests/op_test_util.py +++ b/python/paddle/v2/framework/tests/op_test_util.py @@ -1,7 +1,6 @@ -import paddle.v2.framework.core as core -import unittest import numpy -import paddle.v2.framework.create_op_creation_methods as creation +import paddle.v2.framework.core as core +from paddle.v2.framework.op import Operator class OpTestMeta(type): @@ -21,18 +20,14 @@ class OpTestMeta(type): obj = super(OpTestMeta, cls).__new__(cls, name, bases, attrs) def test_all(self): - func = getattr(creation.op_creations, self.type, None) - self.assertIsNotNone(func) - scope = core.Scope() kwargs = dict() - places = [] - places.append(core.CPUPlace()) + places = [core.CPUPlace()] if core.is_compile_gpu(): places.append(core.GPUPlace(0)) for place in places: - for in_name in func.all_input_args: + for in_name in Operator.get_op_input_names(self.type): if hasattr(self, "inputs") and in_name in self.inputs: kwargs[in_name] = in_name var = scope.new_var(in_name).get_tensor() @@ -42,7 +37,7 @@ class OpTestMeta(type): else: kwargs[in_name] = "@EMPTY@" - for out_name in func.all_output_args: + for out_name in Operator.get_op_output_names(self.type): if not hasattr(self, "outputs"): raise ValueError( "The test op must set self.outputs dict.") @@ -52,21 +47,25 @@ class OpTestMeta(type): kwargs[out_name] = out_name scope.new_var(out_name).get_tensor() - for attr_name in func.all_attr_args: + for attr_name in Operator.get_op_attr_names(self.type): if hasattr(self, "attrs") and attr_name in self.attrs: kwargs[attr_name] = self.attrs[attr_name] - op = func(**kwargs) + op = Operator(self.type, **kwargs) + if isinstance(place, core.GPUPlace) and not op.support_gpu(): + return op.infer_shape(scope) ctx = core.DeviceContext.create(place) op.run(scope, ctx) - for out_name in func.all_output_args: + for out_name in Operator.get_op_output_names(self.type): actual = numpy.array(scope.find_var(out_name).get_tensor()) expect = self.outputs[out_name] - numpy.isclose(actual, expect) + self.assertTrue( + numpy.allclose(actual, expect), + "output name: " + out_name + "has diff") obj.test_all = test_all return obj diff --git a/python/paddle/v2/framework/tests/test_add_two_op.py b/python/paddle/v2/framework/tests/test_add_two_op.py index 8ef48f4727b0af46a696c6f463045d98e7a08800..c0237830647371e14b755953345965a3eac7bfd2 100644 --- a/python/paddle/v2/framework/tests/test_add_two_op.py +++ b/python/paddle/v2/framework/tests/test_add_two_op.py @@ -2,7 +2,7 @@ import unittest import numpy import paddle.v2.framework.core as core -import paddle.v2.framework.create_op_creation_methods as creation +from paddle.v2.framework.op import Operator from op_test_util import OpTestMeta @@ -21,7 +21,7 @@ class TestAddOp(unittest.TestCase): class TestAddGradOp(unittest.TestCase): def test_add_grad(self): - op = creation.op_creations.add_two(X="X", Y="Y", Out="Out") + op = Operator('add_two', X="X", Y="Y", Out="Out") backward_op = core.Operator.backward(op, set()) self.assertEqual(backward_op.type(), "add_two_grad") expected = '''Op(add_two_grad), inputs:(X, Y, Out, Out@GRAD), outputs:(X@GRAD, Y@GRAD).''' diff --git a/python/paddle/v2/framework/tests/test_cross_entropy_op.py b/python/paddle/v2/framework/tests/test_cross_entropy_op.py index b26e25d58b59bd1cb16e9ba2a1cccd27799b15f2..4815192e255c6e0429db3f50918a76a773b30131 100644 --- a/python/paddle/v2/framework/tests/test_cross_entropy_op.py +++ b/python/paddle/v2/framework/tests/test_cross_entropy_op.py @@ -1,9 +1,10 @@ import unittest import numpy from op_test_util import OpTestMeta +from gradient_checker import GradientChecker, create_op -class TestSGD(unittest.TestCase): +class TestCrossEntropy(unittest.TestCase): __metaclass__ = OpTestMeta def setUp(self): @@ -20,7 +21,18 @@ class TestSGD(unittest.TestCase): self.outputs = {'Y': numpy.array(Y).astype("float32")} -# TODO(superjom) add gradient check +class CrossEntropyGradOpTest(GradientChecker): + def test_softmax_grad(self): + op = create_op("onehot_cross_entropy") + batch_size = 100 + class_num = 10 + inputs = { + "X": numpy.random.uniform( + 0.1, 1.0, [batch_size, class_num]).astype("float32"), + "label": (class_num / 2) * numpy.ones(batch_size).astype("int32") + } + self.check_grad(op, inputs, set("X"), "Y") + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_fc_op.py b/python/paddle/v2/framework/tests/test_fc_op.py index 00dc4399aaf59e6382692c3a4356f89a7e79a0c5..e24435839d305bb1a4ab7daa3e9684a421468fd8 100644 --- a/python/paddle/v2/framework/tests/test_fc_op.py +++ b/python/paddle/v2/framework/tests/test_fc_op.py @@ -1,7 +1,7 @@ import paddle.v2.framework.core as core import unittest import numpy -import paddle.v2.framework.create_op_creation_methods as creation +from paddle.v2.framework.op import Operator class TestFc(unittest.TestCase): @@ -24,7 +24,7 @@ class TestFc(unittest.TestCase): # Set a real numpy array here. # x_tensor.set(numpy.array([])) - op = creation.op_creations.fc(X="X", Y="Y", W="W") + op = Operator("fc", X="X", Y="Y", W="W") for out in op.outputs(): if scope.find_var(out) is None: diff --git a/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py b/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py new file mode 100644 index 0000000000000000000000000000000000000000..e5c862605fb11a5ea1426cf8f9054589dc377ff1 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py @@ -0,0 +1,16 @@ +import unittest +from op_test_util import OpTestMeta +import numpy + + +class TestFillZerosLikeOp(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = "fill_zeros_like" + self.inputs = {'Src': numpy.random.random((219, 232)).astype("float32")} + self.outputs = {'Dst': numpy.zeros_like(self.inputs['Src'])} + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_net.py b/python/paddle/v2/framework/tests/test_net.py index db776d6b643dc4014da9f5dded8219180af639e3..b30896553dea4a4929038d524b23c6090bbed380 100644 --- a/python/paddle/v2/framework/tests/test_net.py +++ b/python/paddle/v2/framework/tests/test_net.py @@ -1,16 +1,16 @@ import paddle.v2.framework.core as core -from paddle.v2.framework.create_op_creation_methods import op_creations +from paddle.v2.framework.op import Operator import unittest class TestNet(unittest.TestCase): def test_net_all(self): net = core.Net.create() - op1 = op_creations.add_two(X="X", Y="Y", Out="Out") + op1 = Operator("add_two", X="X", Y="Y", Out="Out") net.add_op(op1) net2 = core.Net.create() - net2.add_op(op_creations.fc(X="X", W="w", Y="fc.out")) + net2.add_op(Operator("fc", X="X", W="w", Y="fc.out")) net2.complete_add_op(True) net.add_op(net2) net.complete_add_op(True) diff --git a/python/paddle/v2/framework/tests/test_network.py b/python/paddle/v2/framework/tests/test_network.py deleted file mode 100644 index 6d53e233e959bd39b558ac97cdca381135505f8d..0000000000000000000000000000000000000000 --- a/python/paddle/v2/framework/tests/test_network.py +++ /dev/null @@ -1,32 +0,0 @@ -from paddle.v2.framework.network import Network -import paddle.v2.framework.core as core -import unittest - - -class TestNet(unittest.TestCase): - def test_net_all(self): - net = Network() - out = net.add_two(X="X", Y="Y") - fc_out = net.fc(X=out, W="w") - net.complete_add_op() - self.assertTrue(isinstance(fc_out, core.Variable)) - self.assertEqual( - '''Op(plain_net), inputs:(@EMPTY@, X, Y, w), outputs:(@TEMP@fc@0, add_two@OUT@0, fc@OUT@1). - Op(add_two), inputs:(X, Y), outputs:(add_two@OUT@0). - Op(fc), inputs:(add_two@OUT@0, w, @EMPTY@), outputs:(fc@OUT@1, @TEMP@fc@0). - Op(mul), inputs:(add_two@OUT@0, w), outputs:(@TEMP@fc@0). - Op(sigmoid), inputs:(@TEMP@fc@0), outputs:(fc@OUT@1). -''', str(net)) - - net2 = Network() - tmp = net2.add_two(X="X", Y="Y") - self.assertTrue(isinstance(tmp, core.Variable)) - net2.complete_add_op() - self.assertEqual( - '''Op(plain_net), inputs:(X, Y), outputs:(add_two@OUT@2). - Op(add_two), inputs:(X, Y), outputs:(add_two@OUT@2). -''', str(net2)) - - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/v2/framework/tests/test_op_creation_methods.py b/python/paddle/v2/framework/tests/test_operator.py similarity index 64% rename from python/paddle/v2/framework/tests/test_op_creation_methods.py rename to python/paddle/v2/framework/tests/test_operator.py index 1d2ce6d0717bfb45355fe0cabc516a598492d518..4f164e1a69e3fd0409f9b575a8bd9b4e423b486b 100644 --- a/python/paddle/v2/framework/tests/test_op_creation_methods.py +++ b/python/paddle/v2/framework/tests/test_operator.py @@ -1,5 +1,5 @@ import unittest -import paddle.v2.framework.create_op_creation_methods as creation +import paddle.v2.framework.op as op import paddle.v2.framework.core as core import paddle.v2.framework.proto.op_proto_pb2 as op_proto_pb2 import paddle.v2.framework.proto.op_desc_pb2 as op_desc_pb2 @@ -8,7 +8,7 @@ import paddle.v2.framework.proto.attribute_pb2 as attribute_pb2 class TestGetAllProtos(unittest.TestCase): def test_all(self): - all_protos = creation.get_all_op_protos() + all_protos = op.get_all_op_protos() self.assertNotEqual(0, len(all_protos)) for each in all_protos: @@ -17,25 +17,25 @@ class TestGetAllProtos(unittest.TestCase): class TestOpDescCreationMethod(unittest.TestCase): def test_plain_input_output(self): - op = op_proto_pb2.OpProto() - op.type = "test" - ipt = op.inputs.add() + op_proto = op_proto_pb2.OpProto() + op_proto.type = "test" + ipt = op_proto.inputs.add() ipt.name = "X" ipt.comment = "not matter" - ipt = op.inputs.add() + ipt = op_proto.inputs.add() ipt.name = "Y" ipt.comment = "not matter" - opt = op.outputs.add() + opt = op_proto.outputs.add() opt.name = "Z" opt.comment = "not matter" - op.comment = "not matter" + op_proto.comment = "not matter" - self.assertTrue(op.IsInitialized()) + self.assertTrue(op_proto.IsInitialized()) - method = creation.OpDescCreationMethod(op) + method = op.OpDescCreationMethod(op_proto) output = method(X="a", Y="b", Z="c") expected = op_desc_pb2.OpDesc() @@ -45,29 +45,29 @@ class TestOpDescCreationMethod(unittest.TestCase): self.assertEqual(expected, output) def test_multiple_input_plain_output(self): - op = op_proto_pb2.OpProto() - op.type = "fc" - ipt = op.inputs.add() + op_proto = op_proto_pb2.OpProto() + op_proto.type = "fc" + ipt = op_proto.inputs.add() ipt.name = "X" ipt.comment = "" ipt.multiple = True - ipt = op.inputs.add() + ipt = op_proto.inputs.add() ipt.name = "W" ipt.comment = "" ipt.multiple = True - ipt = op.inputs.add() + ipt = op_proto.inputs.add() ipt.name = "b" ipt.comment = "" - out = op.outputs.add() + out = op_proto.outputs.add() out.name = "Y" out.comment = "" - op.comment = "" - self.assertTrue(op.IsInitialized()) - method = creation.OpDescCreationMethod(op) + op_proto.comment = "" + self.assertTrue(op_proto.IsInitialized()) + method = op.OpDescCreationMethod(op_proto) generated1 = method(X="x", W="w", b="b", Y="y") expected1 = op_desc_pb2.OpDesc() @@ -93,14 +93,14 @@ class TestOpDescCreationMethod(unittest.TestCase): self.assertEqual(expected2, generated2) def test_attrs(self): - op = op_proto_pb2.OpProto() - op.type = "test" - ipt = op.inputs.add() + op_proto = op_proto_pb2.OpProto() + op_proto.type = "test" + ipt = op_proto.inputs.add() ipt.name = 'X' ipt.comment = "" def __add_attr__(name, type): - attr = op.attrs.add() + attr = op_proto.attrs.add() attr.name = name attr.comment = "" attr.type = type @@ -112,10 +112,10 @@ class TestOpDescCreationMethod(unittest.TestCase): __add_attr__("floats_attr", attribute_pb2.FLOATS) __add_attr__("strings_attr", attribute_pb2.STRINGS) - op.comment = "" - self.assertTrue(op.IsInitialized()) + op_proto.comment = "" + self.assertTrue(op_proto.IsInitialized()) - method = creation.OpDescCreationMethod(op) + method = op.OpDescCreationMethod(op_proto) generated = method( X="a", @@ -162,23 +162,23 @@ class TestOpDescCreationMethod(unittest.TestCase): self.assertEqual(expected, generated) def test_input_temporary_output(self): - op = op_proto_pb2.OpProto() - op.type = "test" - out = op.outputs.add() + op_proto = op_proto_pb2.OpProto() + op_proto.type = "test" + out = op_proto.outputs.add() out.name = "OUT" out.comment = "" - out = op.outputs.add() + out = op_proto.outputs.add() out.name = "TMP" out.comment = "" out.temporary = True - out = op.outputs.add() + out = op_proto.outputs.add() out.name = "OUT2" out.comment = "" - op.comment = "" + op_proto.comment = "" - method = creation.OpDescCreationMethod(op) + method = op.OpDescCreationMethod(op_proto) generated = method(OUT="a", OUT2="b") desc = op_desc_pb2.OpDesc() desc.outputs.extend(["a", core.var_names.temp(), "b"]) @@ -190,60 +190,9 @@ class TestOpDescCreationMethod(unittest.TestCase): self.assertEqual(generated, desc) -class TestOpCreationDocStr(unittest.TestCase): - def test_all(self): - op = op_proto_pb2.OpProto() - op.type = "test" - op.comment = """Test Op. - -This op is used for unit test, not a real op. -""" - a = op.inputs.add() - a.name = "a" - a.comment = "Input a for test op" - a.multiple = True - - b = op.inputs.add() - b.name = "b" - b.comment = "Input b for test op" - self.assertTrue(op.IsInitialized()) - - o1 = op.outputs.add() - o1.name = "output" - o1.comment = "The output of test op" - - o2 = op.outputs.add() - o2.name = "temp output" - o2.comment = "The temporary output of test op" - o2.temporary = True - - test_str = op.attrs.add() - test_str.name = "str_attr" - test_str.type = attribute_pb2.STRING - test_str.comment = "A string attribute for test op" - - actual = creation.get_docstring_from_op_proto(op) - expected_docstring = '''Test Op. - -This op is used for unit test, not a real op. - -:param a: Input a for test op -:type a: list | basestr -:param b: Input b for test op -:type b: basestr -:param output: The output of test op -:type output: basestr -:param temp output: This is a temporary variable. It does not have to set by user. The temporary output of test op -:type temp output: basestr -:param str_attr: A string attribute for test op -:type str_attr: basestr -''' - self.assertEqual(expected_docstring, actual) - - class TestOpCreations(unittest.TestCase): def test_all(self): - add_op = creation.op_creations.add_two(X="a", Y="b", Out="z") + add_op = op.Operator("add_two", X="a", Y="b", Out="z") self.assertIsNotNone(add_op) # Invoke C++ DebugString() self.assertEqual('Op(add_two), inputs:(a, b), outputs:(z).', diff --git a/python/paddle/v2/framework/tests/test_softmax_op.py b/python/paddle/v2/framework/tests/test_softmax_op.py index 98ca8ddc860c3825411b02b2f6ed612db46a18d7..e670d93653e07d35e5019c9daac45c214eddf367 100644 --- a/python/paddle/v2/framework/tests/test_softmax_op.py +++ b/python/paddle/v2/framework/tests/test_softmax_op.py @@ -1,9 +1,8 @@ import unittest import numpy as np -import paddle.v2.framework.core as core -import paddle.v2.framework.create_op_creation_methods as creation +from gradient_checker import GradientChecker, create_op from op_test_util import OpTestMeta @@ -25,62 +24,11 @@ class TestSoftmaxOp(unittest.TestCase): } -class TestSoftmaxGradOp(unittest.TestCase): - def test_softmax_grad(self): - op = creation.op_creations.softmax(X="X", Y="Y") - backward_op = core.Operator.backward(op, set()) - self.assertEqual(backward_op.type(), "softmax_grad") - expected = '''Op(softmax_grad), inputs:(X, Y, Y@GRAD), outputs:(X@GRAD).''' - self.assertEqual(expected, str(backward_op)) - - batch_size = 3 - class_num = 5 - # Initialize X and add 1e-2 for numerical stability - Y = np.random.rand(batch_size, class_num).astype(np.float32) - Y = Y + 1e-2 - dY = np.random.rand(batch_size, class_num).astype(np.float32) - - # Reference implementation of cross entropy with soft labels - def label_softmax_grad(Y, dY): - dX = Y * 0.0 - for i in range(batch_size): - d = np.dot(Y[i, :], dY[i, :]) - dX[i, :] = Y[i, :] * (dY[i, :] - d) - return dX - - expected = label_softmax_grad(Y, dY) - - scope = core.Scope() - places = [] - places.append(core.CPUPlace()) - if core.is_compile_gpu(): - places.append(core.GPUPlace(0)) - - for place in places: - y = scope.new_var("Y") - y_tensor = y.get_tensor() - y_tensor.set_dims([batch_size, class_num]) - y_tensor.alloc_float(place) - y_tensor.set(Y, place) - - dy = scope.new_var("Y@GRAD") - dy_tensor = dy.get_tensor() - dy_tensor.set_dims([batch_size, class_num]) - dy_tensor.alloc_float(place) - dy_tensor.set(dY, place) - - x = scope.new_var("X") - dx = scope.new_var("X@GRAD") - - tensor = scope.find_var("X@GRAD").get_tensor() - backward_op.infer_shape(scope) - self.assertEqual([batch_size, class_num], tensor.shape()) - - ctx = core.DeviceContext.create(place) - backward_op.run(scope, ctx) - actual = np.array(tensor) - - np.testing.assert_almost_equal(actual, expected, decimal=3) +class SoftmaxGradOpTest(GradientChecker): + def test_softmax(self): + op = create_op("softmax") + inputs = {"X": np.random.uniform(0.1, 1, [10, 10]).astype("float32")} + self.check_grad(op, inputs, set("X"), "Y") if __name__ == '__main__': diff --git a/python/paddle/v2/framework/tests/test_uniform_random_op.py b/python/paddle/v2/framework/tests/test_uniform_random_op.py new file mode 100644 index 0000000000000000000000000000000000000000..c3d2bb44da3977c0899b2609a8efe15b7e1789f2 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_uniform_random_op.py @@ -0,0 +1,35 @@ +import unittest +from paddle.v2.framework.op import Operator +import paddle.v2.framework.core as core +import numpy + + +class UniformRandomTest(unittest.TestCase): + def test_uniform_random_cpu(self): + self.uniform_random_test(place=core.CPUPlace()) + + def test_uniform_random_gpu(self): + if core.is_compile_gpu(): + self.uniform_random_test(place=core.GPUPlace(0)) + + def uniform_random_test(self, place): + scope = core.Scope() + scope.new_var("X").get_tensor() + + op = Operator( + "uniform_random", + Out="X", + dims=[1000, 784], + min=-5.0, + max=10.0, + seed=10) + + op.infer_shape(scope) + ctx = core.DeviceContext.create(place) + op.run(scope, ctx) + tensor = numpy.array(scope.find_var("X").get_tensor()) + self.assertAlmostEqual(tensor.mean(), 2.5, delta=0.1) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/setup.py.in b/python/setup.py.in index 7808238aa6ba5ca5c13292638f1c513f87cc2af2..38f0a503bee3eb29ae3c893c96d6e333be54b96e 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -1,4 +1,8 @@ -from setuptools import setup +from setuptools import setup, Distribution + +class BinaryDistribution(Distribution): + def has_ext_modules(foo): + return True packages=['paddle', 'paddle.proto', @@ -11,7 +15,8 @@ packages=['paddle', 'paddle.v2.master', 'paddle.v2.plot', 'paddle.v2.framework', - 'paddle.v2.framework.proto'] + 'paddle.v2.framework.proto', + 'py_paddle'] setup_requires=["requests", "numpy>=1.12", @@ -21,23 +26,33 @@ setup_requires=["requests", "rarfile", "scipy>=0.19.0", "Pillow", - "nltk"] + "nltk>=3.2.2"] if '${CMAKE_SYSTEM_PROCESSOR}' not in ['arm', 'armv7-a', 'aarch64']: setup_requires+=["opencv-python"] -setup(name='paddle', +setup(name='paddlepaddle', version='${PADDLE_VERSION}', description='Parallel Distributed Deep Learning', install_requires=setup_requires, packages=packages, - package_data={'paddle.v2.master': ['libpaddle_master.so'], - 'paddle.v2.framework': ['core.so'] + package_data={ + 'paddle.v2.master': ['libpaddle_master.so'], + 'paddle.v2.framework': ['core.so'], + 'py_paddle':['*.py','_swig_paddle.so'] }, package_dir={ '': '${CMAKE_CURRENT_SOURCE_DIR}', # The paddle.v2.framework.proto will be generated while compiling. # So that package points to other directory. - 'paddle.v2.framework.proto': '${PROJ_BINARY_ROOT}/paddle/framework' + 'paddle.v2.framework.proto': '${PROJ_BINARY_ROOT}/paddle/framework', + 'py_paddle': '${PROJ_ROOT}/paddle/py_paddle' }, + scripts=['${PROJ_BINARY_ROOT}/paddle/scripts/paddle'], + distclass=BinaryDistribution, + data_files=[('/usr/local/opt/paddle/bin', + ['${PROJ_BINARY_ROOT}/paddle/scripts/paddle_usage', + '${PROJ_BINARY_ROOT}/paddle/trainer/paddle_trainer', + '${PROJ_BINARY_ROOT}/paddle/trainer/paddle_merge_model', + '${PROJ_BINARY_ROOT}/paddle/pserver/paddle_pserver_main'])] )