diff --git a/Dockerfile b/Dockerfile index 156ad3552b2c4ff90b405c35c66d44117c2624a4..06a3d8930769bca2599a7afedb3683b2207cb302 100644 --- a/Dockerfile +++ b/Dockerfile @@ -38,17 +38,16 @@ RUN apt-get update && \ RUN pip --no-cache-dir install 'numpy>=1.12.0' # Install Go and glide -RUN wget -O go.tgz https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz && \ - tar -C /usr/local -xzf go.tgz && \ +RUN wget -qO- https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | \ + tar -xz -C /usr/local && \ mkdir /root/gopath && \ mkdir /root/gopath/bin && \ - mkdir /root/gopath/src && \ - rm go.tgz + mkdir /root/gopath/src ENV GOROOT=/usr/local/go GOPATH=/root/gopath # should not be in the same line with GOROOT definition, otherwise docker build could not find GOROOT. ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin # install glide -RUN curl -q https://glide.sh/get | sh +RUN curl -s -q https://glide.sh/get | sh # git credential to skip password typing RUN git config --global credential.helper store diff --git a/cmake/external/any.cmake b/cmake/external/any.cmake index 5d2f7219b2007493916a39e839d647a9d0046c9f..85cce80b70a1fcf57015ac7a264e4950616b2717 100644 --- a/cmake/external/any.cmake +++ b/cmake/external/any.cmake @@ -8,7 +8,7 @@ ExternalProject_Add( extern_lib_any ${EXTERNAL_PROJECT_LOG_ARGS} GIT_REPOSITORY "https://github.com/PaddlePaddle/any.git" - GIT_TAG "8fef1e93710a0edf8d7658999e284a1142c4c020" + GIT_TAG "15595d8324be9e8a9a80d9ae442fdd12bd66df5d" PREFIX ${ANY_SOURCE_DIR} UPDATE_COMMAND "" CONFIGURE_COMMAND "" diff --git a/cmake/external/mklml.cmake b/cmake/external/mklml.cmake index 17a1ca4ed04dce85ae3c7fdd5f22d6eeed03db59..e9fd3d4bedc983ae7c544cf289dc841cf22f9de4 100644 --- a/cmake/external/mklml.cmake +++ b/cmake/external/mklml.cmake @@ -17,7 +17,7 @@ IF(NOT ${WITH_MKLML}) ENDIF(NOT ${WITH_MKLML}) IF(WIN32 OR APPLE) - MESSAGE(WARNING + MESSAGE(WARNING "Windows or Mac is not supported with MKLML in Paddle yet." "Force WITH_MKLML=OFF") SET(WITH_MKLML OFF CACHE STRING "Disable MKLML package in Windows and MacOS" FORCE) @@ -43,22 +43,21 @@ SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_RPATH}" "${MKLML_ROOT}/lib") INCLUDE_DIRECTORIES(${MKLML_INC_DIR}) -SET(mklml_cmakefile ${MKLML_DOWNLOAD_DIR}/CMakeLists.txt) -FILE(WRITE ${mklml_cmakefile} "PROJECT(MKLML)\n" - "cmake_minimum_required(VERSION 3.0)\n" - "install(DIRECTORY ${MKLML_VER}\n" - " DESTINATION ${MKLML_DST_DIR})\n") +FILE(WRITE ${MKLML_DOWNLOAD_DIR}/CMakeLists.txt + "PROJECT(MKLML)\n" + "cmake_minimum_required(VERSION 3.0)\n" + "install(DIRECTORY ${MKLML_VER}\n" + " DESTINATION ${MKLML_DST_DIR})\n") ExternalProject_Add( ${MKLML_PROJECT} ${EXTERNAL_PROJECT_LOG_ARGS} PREFIX ${MKLML_SOURCE_DIR} DOWNLOAD_DIR ${MKLML_DOWNLOAD_DIR} - DOWNLOAD_COMMAND wget --no-check-certificate -O ${MKLML_DOWNLOAD_DIR}/${MKLML_VER}.tgz ${MKLML_URL} - && tar -xzf ${MKLML_DOWNLOAD_DIR}/${MKLML_VER}.tgz + DOWNLOAD_COMMAND wget --no-check-certificate -qO- ${MKLML_URL} | tar xz -C ${MKLML_DOWNLOAD_DIR} DOWNLOAD_NO_PROGRESS 1 UPDATE_COMMAND "" - CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${MKLML_INSTALL_ROOT} + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${MKLML_INSTALL_ROOT} CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${MKLML_INSTALL_ROOT} ) diff --git a/doc/design/releasing_process.md b/doc/design/releasing_process.md index 3692a5248a355cfcfd1cfd0911d43d65166921b1..0c10e782808ca6456347ec54cb5e921162731ede 100644 --- a/doc/design/releasing_process.md +++ b/doc/design/releasing_process.md @@ -11,6 +11,15 @@ Paddle每次发新的版本,遵循以下流程: * 编译这个版本的Ubuntu Deb包。如果失败,修复Ubuntu Deb包编译问题,Patch号加一,返回第二步。 * 使用Regression Test List作为检查列表,测试Docker镜像/ubuntu安装包的功能正确性 * 如果失败,记录下所有失败的例子,在这个`release/版本号`分支中,修复所有bug后,Patch号加一,返回第二步 + * 编译这个版本的python wheel包,并发布到pypi。 + * 由于pypi.python.org目前遵循[严格的命名规范PEP 513](https://www.python.org/dev/peps/pep-0513),在使用twine上传之前,需要重命名wheel包中platform相关的后缀,比如将`linux_x86_64`修改成`manylinux1_x86_64`。 + * pypi上的package名称为paddlepaddle和paddlepaddle_gpu,如果要上传GPU版本的包,需要修改build/python/setup.py中,name: "paddlepaddle_gpu"并重新打包wheel包:`python setup.py bdist_wheel`。 + * 上传方法: + ``` + cd build/python + pip install twine + twine upload dist/[package to upload] + ``` 4. 第三步完成后,将`release/版本号`分支合入master分支,并删除`release/版本号`分支。将master分支的合入commit打上tag,tag为`版本号`。同时再将`master`分支合入`develop`分支。最后删除`release/版本号`分支。 5. 编译master分支的Docker发行镜像,发布到dockerhub。编译ubuntu的deb包,发布到github release页面 6. 协同完成Release Note的书写 diff --git a/doc/getstarted/build_and_install/docker_install_cn.rst b/doc/getstarted/build_and_install/docker_install_cn.rst index 87c286a1af75e08313813f1373ea03b85d4af523..02b96bb413156786db6dc77696c5640b97c10aa4 100644 --- a/doc/getstarted/build_and_install/docker_install_cn.rst +++ b/doc/getstarted/build_and_install/docker_install_cn.rst @@ -3,6 +3,43 @@ PaddlePaddle的Docker容器使用方式 PaddlePaddle目前唯一官方支持的运行的方式是Docker容器。因为Docker能在所有主要操作系统(包括Linux,Mac OS X和Windows)上运行。 请注意,您需要更改 `Dockers设置 `_ 才能充分利用Mac OS X和Windows上的硬件资源。 +Docker使用入门 +------------------------------ + +几个基础的概念帮助理解和使用Docker: + +- *镜像*:一个Docker镜像是一个打包好的软件。它包含了这个软件本身和它所依赖的运行环境。PaddlePaddle的Docker镜像就包含了PaddlePaddle的Python库以及其依赖的多个Python库。这样我们可以直接在Docker中运行需要的程序而不需要安装后在执行。可以执行: + + .. code-block:: bash + + docker images + + 来列出当前系统中的所有镜像,同样可以执行: + + .. code-block:: bash + + docker pull paddlepaddle/paddle:0.10.0 + + 来下载Docker镜像,paddlepaddle/paddle是从官方镜像源Dockerhub.com下载的,推荐国内用户使用ocker.paddlepaddle.org/paddle下载。 + +- *容器*: 如果说一个Docker镜像就是一个程序,那容器就是这个程序运行时产生的“进程”。 + 实际上,一个容器就是一个操作系统的进程,但是是运行在独立的进程空间,文件系统以及网络之上。 + 可以执行: + + .. code-block:: bash + + docker run paddlepaddle/paddle:0.10.0 + + 来使用一个镜像启动一个容器。 + +- 默认情况下,Docker容器会运行在独立的文件系统空间之上,我们无法在Docker容器中 + 访问到主机上的文件。可以通过*挂载Volume*的方式,将主机上的文件或目录挂载到 + Docker容器中。下面的命令把当前目录挂载到了容器中的 /data 目录下,容器使用 + debian镜像,并且启动后执行 :code:`ls /data`。 + + .. code-block:: bash + + docker run --rm -v $(pwd):/data debian ls /data PaddlePaddle发布的Docker镜像使用说明 ------------------------------ @@ -12,11 +49,11 @@ PaddlePaddle需要的所有编译工具。把编译出来的PaddlePaddle也打 像,称为生产镜像,里面涵盖了PaddlePaddle运行所需的所有环境。每次 PaddlePaddle发布新版本的时候都会发布对应版本的生产镜像以及开发镜像。运 行镜像包括纯CPU版本和GPU版本以及其对应的非AVX版本。我们会在 -`dockerhub.com `_ 提供最新 -的Docker镜像,可以在"tags"标签下找到最新的Paddle镜像版本。为了方便在国 -内的开发者下载Docker镜像,我们提供了国内的镜像服务器供大家使用。如果您 -在国内,请把文档里命令中的paddlepaddle/paddle替换成 -docker.paddlepaddle.org/paddle。 +`dockerhub.com `_ +和国内镜像`docker.paddlepaddle.org` 提供最新 +的Docker镜像,可以在"tags"标签下找到最新的Paddle镜像版本。 + +**注意:为了方便在国内的开发者下载Docker镜像,我们提供了国内的镜像服务器供大家使用。如果您在国内,请把文档里命令中的paddlepaddle/paddle替换成docker.paddlepaddle.org/paddle。** 1. 开发镜像::code:`paddlepaddle/paddle:0.10.0-dev` @@ -68,6 +105,8 @@ docker.paddlepaddle.org/paddle。 如果输出是No,就需要选择使用no-AVX的镜像 + **注:在0.10.0之后的版本,PaddlePaddle都可以自动判断硬件是否支持AVX,所以无需判断AVX即可使用** + 以上方法在GPU镜像里也能用,只是请不要忘记提前在物理机上安装GPU最新驱动。 为了保证GPU驱动能够在镜像里面正常运行,我们推荐使用[nvidia-docker](https://github.com/NVIDIA/nvidia-docker)来运行镜像。 diff --git a/doc/getstarted/build_and_install/docker_install_en.rst b/doc/getstarted/build_and_install/docker_install_en.rst index b6fd3329b273aabe80edd5f1ff064a311648b3c2..94860240f6a4a9bed8a865684a8a79960489280e 100644 --- a/doc/getstarted/build_and_install/docker_install_en.rst +++ b/doc/getstarted/build_and_install/docker_install_en.rst @@ -63,12 +63,35 @@ CPU-only version and a CUDA GPU version and their no-AVX versions. We put the docker images on `dockerhub.com `_. You can find the -latest versions under "tags" tab at dockerhub.com. If you are in -China, you can use our Docker image registry mirror to speed up the -download process. To use it, please replace all paddlepaddle/paddle in -the commands to docker.paddlepaddle.org/paddle. +latest versions under "tags" tab at dockerhub.com. -1. Production images, this image might have multiple variants: +** NOTE: If you are in China, you can use our Docker image registry mirror to speed up the download process. To use it, please replace all paddlepaddle/paddle in the commands to docker.paddlepaddle.org/paddle.** + + +1. development image :code:`paddlepaddle/paddle:-dev` + + This image has packed related develop tools and runtime + environment. Users and developers can use this image instead of + their own local computer to accomplish development, build, + releasing, document writing etc. While different version of paddle + may depends on different version of libraries and tools, if you + want to setup a local environment, you must pay attention to the + versions. The development image contains: + + - gcc/clang + - nvcc + - Python + - sphinx + - woboq + - sshd + + Many developers use servers with GPUs, they can use ssh to login to + the server and run :code:`docker exec` to enter the docker + container and start their work. Also they can start a development + docker image with SSHD service, so they can login to the container + and start work. + +2. Production images, this image might have multiple variants: - GPU/AVX::code:`paddlepaddle/paddle:-gpu` - GPU/no-AVX::code:`paddlepaddle/paddle:-gpu-noavx` @@ -84,7 +107,7 @@ the commands to docker.paddlepaddle.org/paddle. if cat /proc/cpuinfo | grep -i avx; then echo Yes; else echo No; fi - + **NOTE:versions after 0.10.0 will automatically detect system AVX support, so manual detect is not needed in this case.** To run the CPU-only image as an interactive container: .. code-block:: bash @@ -103,29 +126,6 @@ the commands to docker.paddlepaddle.org/paddle. nvidia-docker run -it --rm paddlepaddle/paddle:0.10.0-gpu /bin/bash -2. development image :code:`paddlepaddle/paddle:-dev` - - This image has packed related develop tools and runtime - environment. Users and developers can use this image instead of - their own local computer to accomplish development, build, - releasing, document writing etc. While different version of paddle - may depends on different version of libraries and tools, if you - want to setup a local environment, you must pay attention to the - versions. The development image contains: - - - gcc/clang - - nvcc - - Python - - sphinx - - woboq - - sshd - - Many developers use servers with GPUs, they can use ssh to login to - the server and run :code:`docker exec` to enter the docker - container and start their work. Also they can start a development - docker image with SSHD service, so they can login to the container - and start work. - Train Model Using Python API ---------------------------- diff --git a/go/cmd/pserver/pserver.go b/go/cmd/pserver/pserver.go index f9cd8f87e8f2e715c87834ee08482be0f511f681..bec5775d540729000ab2dd3002600f0a92619d70 100644 --- a/go/cmd/pserver/pserver.go +++ b/go/cmd/pserver/pserver.go @@ -32,7 +32,7 @@ import ( func main() { port := flag.Int("port", 0, "port of the pserver") - index := flag.Int("index", -1, "index of this pserver, should be larger or equal than 0") + index := flag.Int("index", -1, "index of the pserver, set to -1 if use etcd for auto pserver index registry") etcdEndpoint := flag.String("etcd-endpoint", "http://127.0.0.1:2379", "comma separated endpoint string for pserver to connect to etcd") dialTimeout := flag.Duration("dial-timeout", 5*time.Second, "dial timeout") @@ -60,12 +60,12 @@ func main() { idx, err = e.Register(*port) candy.Must(err) - cp, err = pserver.NewCheckpointFromFile(*checkpointPath, idx, e) + cp, err = pserver.LoadCheckpoint(e, idx) if err != nil { if err == pserver.ErrCheckpointNotFound { log.Infof("Could not find the pserver checkpoint.") } else { - log.Errorf("Fetch checkpoint failed, %s", err) + panic(err) } } } diff --git a/go/glide.lock b/go/glide.lock index 1f16abdf66422abcd0ab7987cab3499d02cf1b9c..be1fb24d772a6524cb798c6169c23ff03e9fed7b 100644 --- a/go/glide.lock +++ b/go/glide.lock @@ -1,5 +1,5 @@ -hash: 2a1c0eca5c07a130e3d224f9821f96cfa37a39bf6bce141c855bbc57ef569f1c -updated: 2017-07-29T07:34:48.722757905+08:00 +hash: 1b9b07408ca7fac27a374dc2ccd2433e4bff090484008a037df967284949a582 +updated: 2017-08-03T21:46:51.744995189Z imports: - name: github.com/beorn7/perks version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 @@ -145,6 +145,8 @@ imports: version: a1dba9ce8baed984a2495b658c82687f8157b98f subpackages: - xfs +- name: github.com/satori/go.uuid + version: 879c5887cd475cd7864858769793b2ceb0d44feb - name: github.com/sirupsen/logrus version: a3f95b5c423586578a4e099b11a46c2479628cac - name: github.com/topicai/candy diff --git a/go/glide.yaml b/go/glide.yaml index bc23fa6ebf2c3db61e2d63e5f7e7ddcb595dfed0..a90e71b615de92d64c79823e2a04c46001963932 100644 --- a/go/glide.yaml +++ b/go/glide.yaml @@ -14,11 +14,13 @@ import: version: ^1.0.0 - package: github.com/topicai/candy - package: golang.org/x/crypto - vcs: git repo: https://github.com/golang/crypto.git -- package: golang.org/x/sys vcs: git +- package: golang.org/x/sys repo: https://github.com/golang/sys.git -- package: golang.org/x/text vcs: git +- package: golang.org/x/text repo: https://github.com/golang/text.git + vcs: git +- package: github.com/satori/go.uuid + version: v1.1.0 diff --git a/go/master/service.go b/go/master/service.go index d30e9a33229c0aff354417771b5bf2ae6a781715..df7c6860e6ae13a5be7d0425273812208685ee9d 100644 --- a/go/master/service.go +++ b/go/master/service.go @@ -77,11 +77,12 @@ type taskEntry struct { NumFailure int } -type taskQueues struct { +type masterState struct { Todo []taskEntry Pending map[int]taskEntry // map from task ID to task entry Done []taskEntry Failed []taskEntry + CurPass int } // Service is the master server service. @@ -94,11 +95,11 @@ type Service struct { ready chan struct{} initDone bool - mu sync.Mutex - taskQueues taskQueues - currPass int - jobTasks []taskEntry - + mu sync.Mutex + // State to be persisted to snapshot. + state masterState + // The trainer that is currently saving model. This state is + // transient, does not need to be persisted to snapshot. savingTrainer string } @@ -141,8 +142,8 @@ func NewService(store Store, chunksPerTask int, timeoutDur time.Duration, failur s.chunksPerTask = chunksPerTask s.timeoutDur = timeoutDur s.failureMax = failureMax - s.taskQueues = taskQueues{} - s.taskQueues.Pending = make(map[int]taskEntry) + s.state = masterState{} + s.state.Pending = make(map[int]taskEntry) s.ready = make(chan struct{}) s.store = store recovered, err := s.recover() @@ -180,7 +181,7 @@ func (s *Service) recover() (bool, error) { } dec := gob.NewDecoder(gr) - var tqs taskQueues + var tqs masterState err = dec.Decode(&tqs) if err != nil { return false, err @@ -193,7 +194,12 @@ func (s *Service) recover() (bool, error) { log.Errorln(err) } - s.taskQueues = tqs + s.state = tqs + log.WithFields(s.logFields()).Infof("Master recovered from snapshot, scheduling pending task timeout check.") + for _, t := range s.state.Pending { + time.AfterFunc(s.timeoutDur, s.checkTimeoutFunc(t.Task.Meta.ID, t.Task.Meta.Epoch)) + } + return true, nil } @@ -208,7 +214,7 @@ func (s *Service) snapshot() error { var buf bytes.Buffer gw := gzip.NewWriter(&buf) enc := gob.NewEncoder(gw) - err := enc.Encode(s.taskQueues) + err := enc.Encode(s.state) if err != nil { return err } @@ -290,8 +296,7 @@ func (s *Service) SetDataset(globPaths []string, _ *int) error { return err } - s.jobTasks = partition(chunks, s.chunksPerTask) - s.taskQueues.Todo = s.jobTasks + s.state.Todo = partition(chunks, s.chunksPerTask) err = s.snapshot() if err != nil { @@ -319,17 +324,17 @@ func (s *Service) processFailedTask(t taskEntry, epoch int) { } }() - delete(s.taskQueues.Pending, t.Task.Meta.ID) + delete(s.state.Pending, t.Task.Meta.ID) t.NumFailure++ if t.NumFailure > s.failureMax { log.Warningf("Task %v failed %d times, discard.", t.Task, t.NumFailure) - s.taskQueues.Failed = append(s.taskQueues.Failed, t) + s.state.Failed = append(s.state.Failed, t) return } log.Warningf("Task %v failed %d times, re-dispatch.", t.Task, t.NumFailure) - s.taskQueues.Todo = append(s.taskQueues.Todo, t) + s.state.Todo = append(s.state.Todo, t) return } @@ -338,7 +343,7 @@ func (s *Service) checkTimeoutFunc(taskID int, epoch int) func() { s.mu.Lock() defer s.mu.Unlock() - t, ok := s.taskQueues.Pending[taskID] + t, ok := s.state.Pending[taskID] if !ok { return } @@ -350,10 +355,11 @@ func (s *Service) checkTimeoutFunc(taskID int, epoch int) func() { // must be called with lock held. func (s *Service) logFields() log.Fields { return log.Fields{ - "todoLen": len(s.taskQueues.Todo), - "pendingLen": len(s.taskQueues.Pending), - "doneLen": len(s.taskQueues.Done), - "failedLen": len(s.taskQueues.Failed), + "todoLen": len(s.state.Todo), + "pendingLen": len(s.state.Pending), + "doneLen": len(s.state.Done), + "failedLen": len(s.state.Failed), + "curPass": s.state.CurPass, } } @@ -366,17 +372,17 @@ func (s *Service) GetTask(passID int, task *Task) error { s.mu.Lock() defer s.mu.Unlock() - if passID < s.currPass { + if passID < s.state.CurPass { return ErrPassBefore } - if passID > s.currPass { + if passID > s.state.CurPass { // Client may get run to pass after master when one client faster than the // other return ErrPassAfter } - if len(s.taskQueues.Todo) == 0 { - if len(s.taskQueues.Done) == 0 && len(s.taskQueues.Pending) == 0 { + if len(s.state.Todo) == 0 { + if len(s.state.Done) == 0 && len(s.state.Pending) == 0 { log.WithFields(s.logFields()).Warningln("All tasks failed, may start next pass") return ErrAllTaskFailed } @@ -384,10 +390,10 @@ func (s *Service) GetTask(passID int, task *Task) error { return ErrNoMoreAvailable } - t := s.taskQueues.Todo[0] + t := s.state.Todo[0] t.Task.Meta.Epoch++ - s.taskQueues.Todo = s.taskQueues.Todo[1:] - s.taskQueues.Pending[t.Task.Meta.ID] = t + s.state.Todo = s.state.Todo[1:] + s.state.Pending[t.Task.Meta.ID] = t err := s.snapshot() if err != nil { return err @@ -409,7 +415,7 @@ func (s *Service) TaskFinished(taskID int, dummy *int) error { s.mu.Lock() defer s.mu.Unlock() - t, ok := s.taskQueues.Pending[taskID] + t, ok := s.state.Pending[taskID] if !ok { log.WithFields(s.logFields()).Warningln("Pending task #%d not found.", taskID) return nil @@ -417,18 +423,18 @@ func (s *Service) TaskFinished(taskID int, dummy *int) error { // task finished, reset timeout t.NumFailure = 0 - s.taskQueues.Done = append(s.taskQueues.Done, t) - delete(s.taskQueues.Pending, taskID) + s.state.Done = append(s.state.Done, t) + delete(s.state.Pending, taskID) log.WithFields(s.logFields()).Infof("Task #%d finished.", taskID) - if len(s.taskQueues.Todo) == 0 && len(s.taskQueues.Pending) == 0 { + if len(s.state.Todo) == 0 && len(s.state.Pending) == 0 { // increase master side pass count if all tasks finished - s.currPass++ - s.taskQueues.Todo = s.jobTasks - s.taskQueues.Done = []taskEntry{} + s.state.CurPass++ + s.state.Todo = append(s.state.Done, s.state.Failed...) + s.state.Done = []taskEntry{} // TODO(typhoonzero): deal with failed tasks - s.taskQueues.Failed = []taskEntry{} - log.WithFields(s.logFields()).Warningf("all task finished, add new pass data, newpass: %d.", s.currPass) + s.state.Failed = []taskEntry{} + log.WithFields(s.logFields()).Warningf("all task finished, add new pass data, newpass: %d.", s.state.CurPass) } err := s.snapshot() @@ -447,7 +453,7 @@ func (s *Service) TaskFailed(meta TaskMeta, dummy *int) error { s.mu.Lock() defer s.mu.Unlock() - t, ok := s.taskQueues.Pending[meta.ID] + t, ok := s.state.Pending[meta.ID] if !ok { log.WithFields(s.logFields()).Warningln("TaskFailed:Pending task #%v not found.", t.Task.Meta) return nil diff --git a/go/pserver/client/client_test.go b/go/pserver/client/client_test.go index b630d434dca283df67f5b850b35057870fe27529..1243ebd6836550d58144b5033e2755ae8594e948 100644 --- a/go/pserver/client/client_test.go +++ b/go/pserver/client/client_test.go @@ -59,7 +59,7 @@ func initClient() [numPserver]int { go func(l net.Listener) { var cp pserver.Checkpoint - s, err := pserver.NewService(0, 1, "", nil, cp) + s, err := pserver.NewService(0, time.Hour, "", nil, cp) if err != nil { panic(err) } diff --git a/go/pserver/client/etcd_client.go b/go/pserver/client/etcd_client.go index b6ff1fec8a6f37f61f38cb5d004b1d2c886473ed..977ae5af37e2b7d647ae16af9c4403f916b0216d 100644 --- a/go/pserver/client/etcd_client.go +++ b/go/pserver/client/etcd_client.go @@ -103,7 +103,7 @@ func (p *EtcdClient) List() []Server { time.Sleep(p.timeout) continue } - log.Infof("got value (%s) for key: %s", psAddr, psKey) + log.Debugf("got value (%s) for key: %s", psAddr, psKey) servers[i].Index = i servers[i].Addr = psAddr } diff --git a/go/pserver/etcd_client.go b/go/pserver/etcd_client.go index 4fb26307667295ab825d07be6c3d1d4b33f6eb8b..41f0640fc09a3265c0e11c06255c7ee834983203 100644 --- a/go/pserver/etcd_client.go +++ b/go/pserver/etcd_client.go @@ -206,6 +206,7 @@ func (e *EtcdClient) GetKey(key string, timeout time.Duration) ([]byte, error) { if err != nil { return []byte{}, err } + kvs := resp.Kvs if len(kvs) == 0 { return []byte{}, nil @@ -215,9 +216,14 @@ func (e *EtcdClient) GetKey(key string, timeout time.Duration) ([]byte, error) { } // PutKey put into etcd with value by key specified -func (e *EtcdClient) PutKey(key string, value []byte, timeout time.Duration) error { +func (e *EtcdClient) PutKey(key string, value []byte, timeout time.Duration, withLease bool) error { ctx, cancel := context.WithTimeout(context.Background(), timeout) - _, err := e.client.Put(ctx, key, string(value), clientv3.WithLease(e.sess.Lease())) + var err error + if withLease { + _, err = e.client.Put(ctx, key, string(value), clientv3.WithLease(e.sess.Lease())) + } else { + _, err = e.client.Put(ctx, key, string(value)) + } cancel() return err } diff --git a/go/pserver/optimizer.go b/go/pserver/optimizer.go index 709160d45d98b6cf6d60f52ceb3fb33e0a0bd17d..ae7359073494bd9cb6b70b12af4daca064179556 100644 --- a/go/pserver/optimizer.go +++ b/go/pserver/optimizer.go @@ -32,6 +32,7 @@ type optimizer struct { opt *C.struct_paddle_optimizer elementType ElementType contentLen int + config []byte } func cArrayToSlice(p unsafe.Pointer, len int) []byte { @@ -70,6 +71,7 @@ func newOptimizer(paramWithConfigs ParameterWithConfig, State []byte) *optimizer cstate = unsafe.Pointer(&s[0]) } + o.config = c o.opt = C.paddle_create_optimizer((*C.uchar)(&c[0]), C.int(len(c)), C.paddle_element_type(p.ElementType), cbuffer, C.int(paramBufferSize), (*C.char)(cstate), C.int(len(s))) return o diff --git a/go/pserver/service.go b/go/pserver/service.go index 7d297c46d03bf78d18ca9830a318968397119d3e..25751540a9a2dff043c14e0912bfab1aaa938ab4 100644 --- a/go/pserver/service.go +++ b/go/pserver/service.go @@ -25,11 +25,13 @@ import ( "fmt" "io/ioutil" "os" - "path/filepath" + "path" "strconv" "sync" "time" + uuid "github.com/satori/go.uuid" + log "github.com/sirupsen/logrus" ) @@ -42,9 +44,9 @@ var ErrCheckpointNotFound = errors.New("checkpoint not found") // RPC error message. const ( - AlreadyInitialized = "pserver already initialized" - Uninitialized = "pserver not fully initialized" - CheckpointMD5Failed = "checkpoint file MD5 validation failed" + AlreadyInitialized = "pserver already initialized" + Uninitialized = "pserver not fully initialized" + WrongChecksum = "checkpoint file checksum validation failed" ) // Supported element types. @@ -73,11 +75,12 @@ type ParameterWithConfig struct { // checkpointMeta saves checkpoint metadata type checkpointMeta struct { UUID string `json:"uuid"` + Path string `json:"path"` MD5 string `json:"md5"` Timestamp int64 `json:"timestamp"` } -// Checkpoint is the pserver shard persist in file +// Checkpoint is the pserver shard persist in file. type Checkpoint []parameterCheckpoint // Gradient is the gradient of the parameter. @@ -90,50 +93,58 @@ type Service struct { checkpointInterval time.Duration checkpointPath string client *EtcdClient - mu sync.Mutex - optMap map[string]*optimizer + + mu sync.Mutex + optMap map[string]*optimizer } -// parameterCheckpoint saves parameter checkpoint +// parameterCheckpoint saves parameter checkpoint. type parameterCheckpoint struct { ParameterWithConfig State []byte } -// NewCheckpointFromFile loads parameters and state from checkpoint file -func NewCheckpointFromFile(cpPath string, idx int, e *EtcdClient) (Checkpoint, error) { - v, err := e.GetKey(PsPath+string(idx), 3*time.Second) +func loadMeta(e *EtcdClient, idx int) (meta checkpointMeta, err error) { + v, err := e.GetKey(PsCheckpoint+strconv.Itoa(idx), 3*time.Second) if err != nil { - return nil, err + return } if len(v) == 0 { - return nil, ErrCheckpointNotFound + err = ErrCheckpointNotFound + return } - var cpMeta checkpointMeta - if err = json.Unmarshal(v, &cpMeta); err != nil { - return nil, err + if err = json.Unmarshal(v, &meta); err != nil { + return } - fn := filepath.Join(cpPath, cpMeta.UUID) - if _, err = os.Stat(fn); os.IsNotExist(err) { + return +} + +// LoadCheckpoint loads checkpoint from file. +func LoadCheckpoint(e *EtcdClient, idx int) (Checkpoint, error) { + cpMeta, err := loadMeta(e, idx) + if err != nil { return nil, err } - content, err := ioutil.ReadFile(fn) + + content, err := ioutil.ReadFile(cpMeta.Path) if err != nil { return nil, err } + // TODO(helin): change MD5 to CRC since CRC is better for file + // checksum in our use case (emphasize speed over security). h := md5.New() md5 := hex.EncodeToString(h.Sum(content)) if md5 != cpMeta.MD5 { - return nil, errors.New(CheckpointMD5Failed) + return nil, errors.New(WrongChecksum) } dec := gob.NewDecoder(bytes.NewReader(content)) - cp := Checkpoint{} - if err = dec.Decode(cp); err != nil { + var cp Checkpoint + if err = dec.Decode(&cp); err != nil { return nil, err } return cp, nil @@ -193,6 +204,15 @@ func (s *Service) FinishInitParams(_ int, _ *int) error { } close(s.initialized) + go func() { + t := time.Tick(s.checkpointInterval) + for range t { + err := s.checkpoint() + if err != nil { + log.Errorln(err) + } + } + }() return nil } @@ -240,23 +260,36 @@ func (s *Service) GetParam(name string, parameter *Parameter) error { return nil } -// pserver save checkpoint -func (s *Service) doCheckpoint() (err error) { - <-s.initialized - s.mu.Lock() - defer s.mu.Unlock() +func traceTime(start time.Time, name string) { + elapsed := time.Since(start) + log.Infof("%s took %v", name, elapsed) +} + +// checkpoint saves checkpoint to disk. +// +// checkpoint should be only called after the parameters are +// initialized. +func (s *Service) checkpoint() (err error) { + log.Infoln("Begin save checkpoint.") + defer traceTime(time.Now(), "save checkpoint") + s.mu.Lock() cp := make([]parameterCheckpoint, len(s.optMap)) index := 0 + // TODO(helin): write checkpoint incrementally to reduce memory + // footprint during checkpoint. for name, opt := range s.optMap { var pc parameterCheckpoint pc.Param.Name = name pc.Param.ElementType = opt.elementType pc.Param.Content = opt.GetWeights() + pc.Config = opt.config pc.State = opt.GetStates() cp[index] = pc index++ } + s.mu.Unlock() + var buf bytes.Buffer encoder := gob.NewEncoder(&buf) err = encoder.Encode(cp) @@ -264,32 +297,9 @@ func (s *Service) doCheckpoint() (err error) { return } - cpMeta := checkpointMeta{} - cpMeta.UUID = s.checkpointPath + strconv.Itoa(s.idx) - cpMeta.Timestamp = time.Now().UnixNano() - h := md5.New() - cpMeta.MD5 = hex.EncodeToString(h.Sum(buf.Bytes())) - - cpMetajson, err := json.Marshal(cpMeta) - if err != nil { - return - } - - err = s.client.PutKey(filepath.Join(PsCheckpoint, strconv.Itoa(s.idx)), cpMetajson, 3*time.Second) - if err != nil { - return - } - if _, err = os.Stat(cpMeta.UUID); os.IsNotExist(err) { - log.Info("checkpoint does not exists.") - } else { - err = os.Remove(cpMeta.UUID) - if err != nil { - log.Infof("Removing checkpoint %s failed", cpMeta.UUID) - } else { - log.Infof("checkpoint %s already exsits, removing ", cpMeta.UUID) - } - } - f, err := os.Create(cpMeta.UUID) + id := uuid.NewV4().String() + p := path.Join(s.checkpointPath, id) + f, err := os.Create(p) if err != nil { return } @@ -317,5 +327,43 @@ func (s *Service) doCheckpoint() (err error) { return } + oldMeta, err := loadMeta(s.client, s.idx) + if err == ErrCheckpointNotFound { + log.Infoln("Do not have existing checkpoint.") + err = nil + } + + if err != nil { + return + } + + h := md5.New() + md5 := hex.EncodeToString(h.Sum(buf.Bytes())) + cpMeta := checkpointMeta{ + UUID: id, + Timestamp: time.Now().UnixNano(), + MD5: md5, + Path: p, + } + + json, err := json.Marshal(cpMeta) + if err != nil { + return + } + + err = s.client.PutKey(PsCheckpoint+strconv.Itoa(s.idx), json, 3*time.Second, false) + if err != nil { + return + } + + if oldMeta.Path != "" { + rmErr := os.Remove(oldMeta.Path) + if rmErr != nil { + // log error, but still treat checkpoint as + // successful. + log.Errorln(rmErr) + } + } + return } diff --git a/go/pserver/service_test.go b/go/pserver/service_test.go index 988f3b5acb82a95aeb54af2b8b0e4d39a458291a..be648cd1e83e4f7790edac5842db432fb4870072 100644 --- a/go/pserver/service_test.go +++ b/go/pserver/service_test.go @@ -30,7 +30,7 @@ const ( func TestServiceFull(t *testing.T) { var cp pserver.Checkpoint - s, err := pserver.NewService(0, 1, "", nil, cp) + s, err := pserver.NewService(0, time.Hour, "", nil, cp) if err != nil { t.Error(err) } @@ -102,7 +102,7 @@ func TestServiceFull(t *testing.T) { func TestMultipleInit(t *testing.T) { var cp pserver.Checkpoint - s, err := pserver.NewService(0, 1, "", nil, cp) + s, err := pserver.NewService(0, time.Hour, "", nil, cp) if err != nil { t.Fatal(err) } @@ -119,7 +119,7 @@ func TestMultipleInit(t *testing.T) { func TestUninitialized(t *testing.T) { var cp pserver.Checkpoint - s, err := pserver.NewService(0, 1, "", nil, cp) + s, err := pserver.NewService(0, time.Hour, "", nil, cp) err = s.SendGrad(pserver.Gradient{}, nil) if err.Error() != pserver.Uninitialized { t.Fatal(err) @@ -128,7 +128,7 @@ func TestUninitialized(t *testing.T) { func TestBlockUntilInitialized(t *testing.T) { var cp pserver.Checkpoint - s, err := pserver.NewService(0, 1, "", nil, cp) + s, err := pserver.NewService(0, time.Hour, "", nil, cp) if err != nil { t.Error(err) } diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index f8a88cf317aee6c5dd25e4cc25d588c6c50fcbce..cf61a243e9df2fd4a580e41f07cb0a22dcc72083 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -22,7 +22,5 @@ if(WITH_C_API) endif() if(WITH_SWIG_PY) - configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in - ${CMAKE_CURRENT_SOURCE_DIR}/setup.py) add_subdirectory(api) endif() diff --git a/paddle/api/CMakeLists.txt b/paddle/api/CMakeLists.txt index 84da89a1422b6095b995744cebb6a3af98a071c6..7a1e8b8b26ac6330c3799b7dfeb4447e171fe0f1 100644 --- a/paddle/api/CMakeLists.txt +++ b/paddle/api/CMakeLists.txt @@ -82,9 +82,7 @@ SWIG_LINK_LIBRARIES(swig_paddle add_custom_command(OUTPUT ${PROJ_ROOT}/paddle/py_paddle/_swig_paddle.so COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/swig_paddle.py ${PROJ_ROOT}/paddle/py_paddle COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/_swig_paddle.so ${PROJ_ROOT}/paddle/py_paddle - COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel - COMMAND ${CMAKE_COMMAND} -E touch dist/.timestamp - COMMAND rm -rf py_paddle.egg-info build + COMMAND ${CMAKE_COMMAND} -E touch .timestamp WORKING_DIRECTORY ${PROJ_ROOT}/paddle DEPENDS _swig_paddle ) @@ -92,10 +90,6 @@ add_custom_command(OUTPUT ${PROJ_ROOT}/paddle/py_paddle/_swig_paddle.so # TODO(yuyang18) : make wheel name calculated by cmake add_custom_target(python_api_wheel ALL DEPENDS ${PROJ_ROOT}/paddle/py_paddle/_swig_paddle.so) -install(DIRECTORY ${CMAKE_SOURCE_DIR}/paddle/dist/ - DESTINATION opt/paddle/share/wheels -) - if(WITH_TESTING) IF(NOT PY_PIP_FOUND) SET(PIP_SOURCES_DIR ${PYTHON_SOURCES_DIR}/pip) @@ -108,7 +102,7 @@ if(WITH_TESTING) BUILD_COMMAND "" INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install BUILD_IN_SOURCE 1 - DEPENDS python setuptools python_api_wheel + #DEPENDS python setuptools python_api_wheel ) ENDIF() add_subdirectory(test) diff --git a/paddle/cuda/CMakeLists.txt b/paddle/cuda/CMakeLists.txt index 73ffa690d9d91b673079fc0ecf91f17cbabfdb1e..0865b02c4f275f3d5069109917b05dff1393fc1e 100755 --- a/paddle/cuda/CMakeLists.txt +++ b/paddle/cuda/CMakeLists.txt @@ -39,6 +39,7 @@ set(CUDA_CU_SOURCES src/hl_cuda_lstm.cu src/hl_top_k.cu src/hl_batch_transpose.cu + src/hl_batch_norm.cu src/hl_cuda_sequence.cu src/hl_table_apply.cu) diff --git a/paddle/cuda/include/hl_batch_norm.h b/paddle/cuda/include/hl_batch_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..afc5e0b2deacc4aadf98b3f7ce115e534bbc5124 --- /dev/null +++ b/paddle/cuda/include/hl_batch_norm.h @@ -0,0 +1,48 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifndef HL_BATCH_NORM_H_ +#define HL_BATCH_NORM_H_ + +#include "hl_base.h" + +/** + * @brief batch norm inferece. + * + * @param[in] input input data. + * @param[out] output output data. + * @param[in] scale batch normalization scale parameter (in original + * paper scale is referred to as gamma). + * @param[in] bias batch normalization bias parameter (in original + * paper scale is referred to as beta). + * @param[in] estimatedMean + * @param[in] estimatedVar The moving mean and variance + * accumulated during the training phase are passed + * as inputs here. + * @param[in] epsilon Epsilon value used in the batch + * normalization formula. + */ +extern void hl_batch_norm_cuda_inference(const real* input, + real* output, + const real* scale, + const real* bias, + const real* estimatedMean, + const real* estimatedVar, + const double epsilon, + size_t batchSize, + size_t channel, + size_t height, + size_t width); + +#endif // HL_BATCH_NORM_H_ diff --git a/paddle/cuda/src/hl_batch_norm.cu b/paddle/cuda/src/hl_batch_norm.cu new file mode 100644 index 0000000000000000000000000000000000000000..5828ecb8e049c2f0573ab8547164794bef6db1ca --- /dev/null +++ b/paddle/cuda/src/hl_batch_norm.cu @@ -0,0 +1,66 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "hl_batch_norm.h" + +__global__ void batchNormInference(real* output, + const real* input, + const real* scale, + const real* bias, + const real* estimatedMean, + const real* estimatedVar, + const double epsilon, + size_t batchSize, + size_t channel, + size_t height, + size_t width) { + const int tid = threadIdx.x; + const int num = channel * height * width; + const int batch = blockIdx.x; + for (int i = tid; i < num; i += blockDim.x) { + const int c = i / (height * width); + const int id = batch * num + i; + real val = input[id] - estimatedMean[c]; + val /= sqrt(estimatedVar[c] + epsilon); + val *= scale[c]; + val += bias[c]; + output[id] = val; + } +} + +void hl_batch_norm_cuda_inference(const real* input, + real* output, + const real* scale, + const real* bias, + const real* estimatedMean, + const real* estimatedVar, + const double epsilon, + size_t batchSize, + size_t channel, + size_t height, + size_t width) { + batchNormInference<<>>(output, + input, + scale, + bias, + estimatedMean, + estimatedVar, + epsilon, + batchSize, + channel, + height, + width); + + CHECK_SYNC("hl_batch_norm_cuda_inference failed!"); +} diff --git a/paddle/cuda/src/hl_cuda_cudnn.cc b/paddle/cuda/src/hl_cuda_cudnn.cc index 7ad8a39768a064140a08c912a5a467bc24a12adf..78642a17443b0b4d81defaa46579332ef20c71a1 100644 --- a/paddle/cuda/src/hl_cuda_cudnn.cc +++ b/paddle/cuda/src/hl_cuda_cudnn.cc @@ -1023,14 +1023,6 @@ void hl_batch_norm_forward_inference(hl_tensor_descriptor inputDesc, real beta = 1.0f; cudnnBatchNormMode_t mode = CUDNN_BATCHNORM_SPATIAL; - int batch_size = ((cudnn_tensor_descriptor)inputDesc)->batch_size; - if (batch_size > 1024 && g_cudnn_lib_version < 6000) { - LOG(INFO) << " To process current batch data with size " << batch_size - << " (>1024), cudnnBatchNorm requires cuDNN version >= 6000." - << " If there is an error complaining CUDNN_STATUS_NOT_SUPPORTED," - << " just recompile PaddlePaddle with cuDNN >= 6000, replacing" - << " current version " << g_cudnn_lib_version; - } CHECK_CUDNN( dynload::cudnnBatchNormalizationForwardInference(t_resource.cudnn_handle, mode, diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 1db042c6fc8b6c4ea7c3854ea4b1cd016deeb0b6..f6ad5b2e4258553fc1a4eeb869b9d4d02cae9e26 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -35,6 +35,8 @@ add_dependencies(framework_py_proto framework_py_proto_init) cc_library(backward SRCS backward.cc DEPS net_op) cc_test(backward_test SRCS backward_test.cc DEPS backward) + +if(WITH_PYTHON) cc_library(paddle_pybind SHARED SRCS pybind.cc DEPS pybind python backward @@ -43,4 +45,6 @@ cc_library(paddle_pybind SHARED add_op mean_op cross_entropy_op + fill_zeros_like_op recurrent_op) +endif(WITH_PYTHON) diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 6c26183818a9d6996e3d3ce2af74ba36f4711eca..b2813da83d9e4c525e66bb1f79b28769627eaec2 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -260,6 +260,12 @@ class OpRegistry { return CreateOp(op_desc.type(), inputs, outputs, attrs); } + static bool SupportGPU(const std::string& op_type) { + OperatorWithKernel::OpKernelKey key; + key.place_ = platform::GPUPlace(); + return OperatorWithKernel::AllOpKernels().at(op_type).count(key) != 0; + } + static std::shared_ptr CreateGradOp(const OperatorBase& op) { PADDLE_ENFORCE(!op.IsNetOp(), "Use framework::Backward to get backward ops"); diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index beb6793289812cfaa6991d28379126ff29fa2547..d9a013b883abdec4422806f90e36da7410a4fa0c 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -34,8 +34,8 @@ ExecutionContext::GetEigenDevice() const { #endif const std::string& OperatorBase::Input(const std::string& name) const { - PADDLE_ENFORCE(in_out_idxs_ != nullptr, - "Input Output Indices could not be nullptr"); + PADDLE_ENFORCE_NOT_NULL(in_out_idxs_, + "Input Output Indices could not be nullptr"); auto it = in_out_idxs_->find(name); PADDLE_ENFORCE(it != in_out_idxs_->end(), "no key [%s] in in_out_idxs_", name); @@ -49,7 +49,7 @@ const std::string& OperatorBase::Input(const std::string& name) const { } std::vector OperatorBase::Inputs(const std::string& name) const { - PADDLE_ENFORCE(in_out_idxs_ != nullptr, "IO Idx could not be nullptr"); + PADDLE_ENFORCE_NOT_NULL(in_out_idxs_, "IO Idx could not be nullptr"); auto input_format = GetAttr>("input_format"); auto offset = in_out_idxs_->at(name); PADDLE_ENFORCE(input_format.at(static_cast(offset) + 1) <= @@ -62,7 +62,7 @@ std::vector OperatorBase::Inputs(const std::string& name) const { } const std::string& OperatorBase::Output(const std::string& name) const { - PADDLE_ENFORCE(in_out_idxs_ != nullptr, "InOut Indice could not be nullptr"); + PADDLE_ENFORCE_NOT_NULL(in_out_idxs_, "InOut Indice could not be nullptr"); auto it = in_out_idxs_->find(name); PADDLE_ENFORCE(it != in_out_idxs_->end(), "no key [%s] in in_out_idxs_", name); @@ -76,7 +76,7 @@ const std::string& OperatorBase::Output(const std::string& name) const { } std::vector OperatorBase::Outputs(const std::string& name) const { - PADDLE_ENFORCE(in_out_idxs_ != nullptr, "InOut Indice could not be nullptr"); + PADDLE_ENFORCE_NOT_NULL(in_out_idxs_, "InOut Indice could not be nullptr"); auto output_format = GetAttr>("output_format"); auto offset = in_out_idxs_->at(name); PADDLE_ENFORCE(output_format.at(static_cast(offset) + 1) <= diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 9672492d1c2c6a82c37e0a840a4ca9c111de06d8..03fabff79b637299f8e133aab29ccb0e145379cf 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -167,15 +167,15 @@ class OperatorContext { template const T* Input(const size_t index) const { auto var = InputVar(index); - PADDLE_ENFORCE(var != nullptr, "Input(%d) should not be nullptr", index); + PADDLE_ENFORCE_NOT_NULL(var, "Input(%d) should not be nullptr", index); return &var->Get(); } template T* Output(const size_t index) const { auto var = OutputVar(index); - PADDLE_ENFORCE( - var != nullptr, + PADDLE_ENFORCE_NOT_NULL( + var, "Output(%d) not be nullptr, which means variable [%s] does not " "exist in scope", index, op_.outputs_[index]); @@ -185,14 +185,14 @@ class OperatorContext { template const T* Input(const std::string& name) const { auto var = InputVar(name); - PADDLE_ENFORCE(var != nullptr, "Input(%s) should not be nullptr", name); + PADDLE_ENFORCE_NOT_NULL(var, "Input(%s) should not be nullptr", name); return &var->Get(); } template T* Output(const std::string& name) const { auto var = OutputVar(name); - PADDLE_ENFORCE(var != nullptr, "Output(%s) should not be nullptr", name); + PADDLE_ENFORCE_NOT_NULL(var, "Output(%s) should not be nullptr", name); return var->GetMutable(); } @@ -204,9 +204,9 @@ class OperatorContext { std::transform(names.begin(), names.end(), std::back_inserter(res), [&](const std::string& sub_name) { auto var = scope_.FindVar(sub_name); - PADDLE_ENFORCE(var != nullptr, - "MultiInput(%s:%s) should not be nullptr", - name, sub_name); + PADDLE_ENFORCE_NOT_NULL( + var, "MultiInput(%s:%s) should not be nullptr", name, + sub_name); return &var->Get(); }); return res; @@ -220,9 +220,9 @@ class OperatorContext { std::transform(names.begin(), names.end(), std::back_inserter(res), [&](const std::string& sub_name) { auto var = scope_.FindVar(sub_name); - PADDLE_ENFORCE(var != nullptr, - "MultiOutput(%s:%s) should not be nullptr", - name, sub_name); + PADDLE_ENFORCE_NOT_NULL( + var, "MultiOutput(%s:%s) should not be nullptr", name, + sub_name); return var->GetMutable(); }); return res; diff --git a/paddle/framework/pybind.cc b/paddle/framework/pybind.cc index cbb86c4195a6c7e976fc5e0dd69d77be46dfb17c..9ee2c6af86476ea50def237ed011fcddaa41daad 100644 --- a/paddle/framework/pybind.cc +++ b/paddle/framework/pybind.cc @@ -32,7 +32,7 @@ limitations under the License. */ namespace py = pybind11; USE_OP(add_two); -USE_OP(onehot_cross_entropy); +USE_OP_CPU(onehot_cross_entropy); USE_OP_WITHOUT_KERNEL(fc); USE_OP(sgd); USE_OP(mul); @@ -40,6 +40,7 @@ USE_OP(mean); USE_OP(sigmoid); USE_OP(softmax); USE_OP(rowwise_add); +USE_OP(fill_zeros_like); USE_OP_WITHOUT_KERNEL(recurrent_op); namespace paddle { namespace framework { @@ -200,6 +201,8 @@ All parameter, weight, gradient are variables in Paddle. return OpRegistry::CreateOp(desc); }); + operator_base.def_static("support_gpu", &OpRegistry::SupportGPU); + operator_base.def("backward", [](const OperatorBase &forwardOp, const std::unordered_set &no_grad_vars) { diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 4c3b14b83d841e88683a13634c93f51c012128b6..c44df05e4b0fceed858fbf4f68eddc407a44c894 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -127,8 +127,8 @@ class Tensor { memory::PODDeleter(place)), place_(place), size_(size) { - PADDLE_ENFORCE(ptr_ != nullptr, "Insufficient %s memory to allocation.", - is_cpu_place(place_) ? "CPU" : "GPU"); + PADDLE_ENFORCE_NOT_NULL(ptr_, "Insufficient %s memory to allocation.", + (is_cpu_place(place_) ? "CPU" : "GPU")); } virtual size_t size() const { return size_; } diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 92621f8c18ec0d03160a23c462830d14272c7f64..8d9bec6dc9c3f0af822a0d8cd8588dc932970652 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -14,17 +14,18 @@ limitations under the License. */ #pragma once #include "paddle/memory/memcpy.h" +#include "paddle/platform/enforce.h" namespace paddle { namespace framework { template inline void Tensor::check_memory_size() const { - PADDLE_ENFORCE(holder_ != nullptr, - "Tenosr holds no memory. Call Tensor::mutable_data first."); - PADDLE_ENFORCE(holder_->size() >= product(dims_) * sizeof(T) + offset_, - "Tensor's dims_ is out of bound. Call Tensor::mutable_data " - "first to re-allocate memory."); + PADDLE_ENFORCE_NOT_NULL( + holder_, "Tenosr holds no memory. Call Tensor::mutable_data first."); + PADDLE_ENFORCE_GE(holder_->size(), product(dims_) * sizeof(T) + offset_, + "Tensor's dims_ is out of bound. Call Tensor::mutable_data " + "first to re-allocate memory."); } template @@ -51,9 +52,9 @@ inline T* Tensor::mutable_data(DDim dims, platform::Place place) { template inline T* Tensor::mutable_data(platform::Place place) { static_assert(std::is_pod::value, "T must be POD"); - PADDLE_ENFORCE(product(dims_) > 0, - "Tensor's numel must be larger than zero to call " - "Tensor::mutable_data. Call Tensor::set_dim first."); + PADDLE_ENFORCE_GT(product(dims_), 0, + "Tensor's numel must be larger than zero to call " + "Tensor::mutable_data. Call Tensor::set_dim first."); /* some versions of boost::variant don't have operator!= */ size_t size = product(dims_) * sizeof(T); if (holder_ == nullptr || !(holder_->place() == place) || @@ -120,11 +121,11 @@ inline void Tensor::CopyFrom(const Tensor& src, template inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { check_memory_size(); - PADDLE_ENFORCE(begin_idx >= 0, "Slice begin index is less than zero."); - PADDLE_ENFORCE(end_idx <= dims_[0], "Slice end index is out of bound."); - PADDLE_ENFORCE(begin_idx < end_idx, - "Begin index must be less than end index."); - PADDLE_ENFORCE(dims_[0] != 1, "Can not slice a tensor with dims_[0] = 1."); + PADDLE_ENFORCE_GE(begin_idx, 0, "Slice begin index is less than zero."); + PADDLE_ENFORCE_LE(end_idx, dims_[0], "Slice end index is out of bound."); + PADDLE_ENFORCE_LT(begin_idx, end_idx, + "Begin index must be less than end index."); + PADDLE_ENFORCE_NE(dims_[0], 1, "Can not slice a tensor with dims_[0] = 1."); int base = product(dims_) / dims_[0]; Tensor dst; dst.holder_ = holder_; diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc index ef1cc10b840896d9ab97f963fc12a4971cd74e1f..20276181b974bb5b3d6cb40fb5e6c1295cf1c02f 100644 --- a/paddle/framework/tensor_test.cc +++ b/paddle/framework/tensor_test.cc @@ -36,7 +36,8 @@ TEST(Tensor, DataAssert) { } catch (paddle::platform::EnforceNotMet err) { caught = true; std::string msg = - "Tenosr holds no memory. Call Tensor::mutable_data first."; + "holder_ should not be null\nTenosr holds no memory. Call " + "Tensor::mutable_data first."; const char* what = err.what(); for (size_t i = 0; i < msg.length(); ++i) { ASSERT_EQ(what[i], msg[i]); @@ -111,7 +112,8 @@ TEST(Tensor, ShareDataWith) { } catch (paddle::platform::EnforceNotMet err) { caught = true; std::string msg = - "Tenosr holds no memory. Call Tensor::mutable_data first."; + "holder_ should not be null\nTenosr holds no memory. Call " + "Tensor::mutable_data first."; const char* what = err.what(); for (size_t i = 0; i < msg.length(); ++i) { ASSERT_EQ(what[i], msg[i]); diff --git a/paddle/gserver/layers/CudnnBatchNormLayer.cpp b/paddle/gserver/layers/CudnnBatchNormLayer.cpp index 09dac05a7ad7a80bd6b9e12e8f7f060310d516c8..44ba2c4b7d1562d2ce839b5f4b4de1af35e6925f 100644 --- a/paddle/gserver/layers/CudnnBatchNormLayer.cpp +++ b/paddle/gserver/layers/CudnnBatchNormLayer.cpp @@ -14,6 +14,7 @@ limitations under the License. */ #include "CudnnBatchNormLayer.h" #include "Layer.h" +#include "paddle/cuda/include/hl_batch_norm.h" #include "paddle/utils/Stat.h" namespace paddle { @@ -79,16 +80,33 @@ void CudnnBatchNormLayer::forward(PassType passType) { savedInvVar); } else { // used movingMean and movingVar in testing - hl_batch_norm_forward_inference(ioDesc_, - input, - ioDesc_, - output, - bnParamDesc_, - gamma, - beta, - movingMean, - movingVar, - EPS); + if (batchSize <= 1024) { + hl_batch_norm_forward_inference(ioDesc_, + input, + ioDesc_, + output, + bnParamDesc_, + gamma, + beta, + movingMean, + movingVar, + EPS); + } else { + // There is a limitation in cudnn library. + // When the batch size is larger than 1024 in cuDNN v5.1, + // the cudnnBatchNormalizationForwardInference will fail. + hl_batch_norm_cuda_inference(input, + output, + gamma, + beta, + movingMean, + movingVar, + EPS, + batchSize, + channels_, + imageH_, + imageW_); + } } /* activation */ { diff --git a/paddle/gserver/tests/test_BatchNorm.cpp b/paddle/gserver/tests/test_BatchNorm.cpp index 83fcfed46cd568d22237eeef9c0215e4e3ad2666..659eefa31bdb1f2433d03a59d5bf4782c71bdecf 100644 --- a/paddle/gserver/tests/test_BatchNorm.cpp +++ b/paddle/gserver/tests/test_BatchNorm.cpp @@ -21,6 +21,8 @@ limitations under the License. */ #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" +#include "paddle/cuda/include/hl_batch_norm.h" +#include "paddle/math/tests/TensorCheck.h" #include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT @@ -117,6 +119,74 @@ TEST(Layer, batchNorm) { CHECK_EQ(static_cast(convLayer->getOutputValue()->getWidth()), 576); } +#ifndef PADDLE_ONLY_CPU +void batchNormInference(int n, int c, int h, int w) { + MatrixPtr input = std::make_shared(n, c * h * w); + MatrixPtr cudnnOut = std::make_shared(n, c * h * w); + MatrixPtr cudaOut = std::make_shared(n, c * h * w); + MatrixPtr cudnnCheck = std::make_shared(n, c * h * w); + MatrixPtr cudaCheck = std::make_shared(n, c * h * w); + input->randomizeUniform(); + cudnnOut->zeroMem(); + cudaOut->zeroMem(); + + MatrixPtr scale = std::make_shared(1, c); + scale->randomizeUniform(); + MatrixPtr bias = std::make_shared(1, c); + bias->randomizeUniform(); + + MatrixPtr movingMean = std::make_shared(1, c); + movingMean->randomizeUniform(); + + MatrixPtr movingVar = std::make_shared(1, c); + movingVar->randomizeUniform(); + movingVar->clip(0.01, 50); + + hl_tensor_descriptor ioDesc; + hl_tensor_descriptor bnDesc; + hl_create_tensor_descriptor(&ioDesc); + hl_create_tensor_descriptor(&bnDesc); + hl_tensor_reshape(ioDesc, n, c, h, w); + hl_tensor_reshape(bnDesc, 1, c, 1, 1); + + double EPS = 1E-5; + hl_batch_norm_forward_inference(ioDesc, + input->getData(), + ioDesc, + cudnnOut->getData(), + bnDesc, + scale->getData(), + bias->getData(), + movingMean->getData(), + movingVar->getData(), + EPS); + + hl_batch_norm_cuda_inference(input->getData(), + cudaOut->getData(), + scale->getData(), + bias->getData(), + movingMean->getData(), + movingVar->getData(), + EPS, + n, + c, + h, + w); + + cudnnCheck->copyFrom(*cudnnOut); + cudaCheck->copyFrom(*cudaOut); + autotest::TensorCheckErr(*cudnnCheck, *cudaCheck); + + hl_destroy_tensor_descriptor(ioDesc); + hl_destroy_tensor_descriptor(bnDesc); +} + +TEST(BatchNorm, Inference) { + batchNormInference(33, 267, 1, 1); + batchNormInference(19, 105, 4, 4); +} +#endif + int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); initMain(argc, argv); diff --git a/paddle/operators/add_op.cc b/paddle/operators/add_op.cc index d4c05ed483ca56a31dd8ee4d81b54551ae6da0d1..fb85093bb2f4ef7950bd3bab3d0b7b9348763448 100644 --- a/paddle/operators/add_op.cc +++ b/paddle/operators/add_op.cc @@ -22,8 +22,7 @@ class AddOp : public OperatorWithKernel { void InferShape(const InferShapeContext &ctx) const override { PADDLE_ENFORCE_EQ(ctx.InputSize(), 2); PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1); - PADDLE_ENFORCE(ctx.InputVar(0) != nullptr && ctx.InputVar(1) != nullptr, - "Inputs of AddOp must all be set"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0), "Inputs of AddOp must all be set"); PADDLE_ENFORCE(ctx.OutputVar(0) != nullptr, "Outputs of AddOp must all be set"); PADDLE_ENFORCE(ctx.Input(0)->dims() == ctx.Input(1)->dims(), diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index b0e1b8e41a5320aa14e316a56dbfd01e43c6816b..942b919079bf06caeb6d185efb31d9d28d193008 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -20,18 +20,19 @@ namespace operators { class OnehotCrossEntropyOp : public OperatorWithKernel { protected: void InferShape(const InferShapeContext &ctx) const override { - PADDLE_ENFORCE(ctx.InputSize() == 2, - "Input size of OnehotCrossEntropyOp must be two"); - PADDLE_ENFORCE(ctx.OutputSize() == 1, - "Output size of OnehotCrossEntropyOp must be one"); - PADDLE_ENFORCE(ctx.InputVar(0) != nullptr && ctx.InputVar(1) != nullptr, - "Inputs of OnehotCrossEntropyOp must all be set"); - PADDLE_ENFORCE(ctx.OutputVar(0) != nullptr, - "Outputs of OnehotCrossEntropyOp must all be set"); - PADDLE_ENFORCE(ctx.Input(0)->dims().size() == 2, - "X's dimension must be 2."); - PADDLE_ENFORCE(ctx.Output(0)->dims().size() == 1, - "label's dimension must be 1."); + PADDLE_ENFORCE_EQ(ctx.InputSize(), 2, + "Input size of OnehotCrossEntropyOp must be two"); + PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1, + "Output size of OnehotCrossEntropyOp must be one"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0), + "0-th input of OnehotCrossEntropyOp should be set"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(1), + "1-th input of OnehotCrossEntropyOp should be set"); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar(0), + "Outputs of OnehotCrossEntropyOp must all be set"); + PADDLE_ENFORCE_EQ(ctx.Input(0)->dims().size(), 2); + PADDLE_ENFORCE_EQ(ctx.Output(0)->dims().size(), 1, + "label's dimension must be 1."); ctx.Output(0)->Resize({ctx.Input(0)->dims()[0]}); } }; diff --git a/paddle/operators/cross_entropy_op.cu b/paddle/operators/cross_entropy_op.cu index 2f453f8379ca7ce0612fed757719acb2d2cf0ad8..ec73721a810fa86d65409f643401eb77248ad5de 100644 --- a/paddle/operators/cross_entropy_op.cu +++ b/paddle/operators/cross_entropy_op.cu @@ -14,6 +14,3 @@ #define EIGEN_USE_GPU #include "paddle/operators/cross_entropy_op.h" - -REGISTER_OP_GPU_KERNEL(onehot_cross_entropy, - ops::OnehotCrossEntropyOpKernel); diff --git a/paddle/operators/cross_entropy_op.h b/paddle/operators/cross_entropy_op.h index 88d06e13469f8e6fc9e634d804c1fe0bed5e2d75..e02e3e2945af13fe283f95f7faa03b2a76d06125 100644 --- a/paddle/operators/cross_entropy_op.h +++ b/paddle/operators/cross_entropy_op.h @@ -18,7 +18,24 @@ limitations under the License. */ namespace paddle { namespace operators { -static const float kCrossEntropyLogThreshold{1e-20}; +template +T tolerable_value(T x) { + static_assert(std::is_floating_point::value, + "tolerable_value works only on float, " + "double and double double."); + + const T kApproInf = 1e20; + + if (x == INFINITY) { + return kApproInf; + } + + if (x == -INFINITY) { + return -kApproInf; + } + + return x; +} template class OnehotCrossEntropyOpKernel : public OpKernel { @@ -36,10 +53,9 @@ class OnehotCrossEntropyOpKernel : public OpKernel { int batch_size = X->dims()[0]; int class_num = X->dims()[1]; - // Y[i] = -log(X[i][j]) for (int i = 0; i < batch_size; ++i) { - Ydata[i] = -std::log(std::max(Xdata[i * class_num + label_data[i]], - kCrossEntropyLogThreshold)); + int index = i * class_num + label_data[i]; + Ydata[i] = -tolerable_value(std::log(Xdata[index])); } } }; @@ -62,9 +78,8 @@ class OnehotCrossEntropyGradientOpKernel : public OpKernel { const int class_num = X->dims()[1]; for (int i = 0; i < batch_size; ++i) { - dXdata[i * class_num + label_data[i]] = - -dYdata[i] / std::max(Xdata[i * class_num + label_data[i]], - kCrossEntropyLogThreshold); + int index = i * class_num + label_data[i]; + dXdata[index] = -tolerable_value(dYdata[i] / Xdata[index]); } } }; diff --git a/paddle/operators/fill_zeros_like_op.cc b/paddle/operators/fill_zeros_like_op.cc index 3d37d64c5a8c288684122f3e686262399d32ed7b..6dcc9372b2ee25c7e653282e7763e97d56be6262 100644 --- a/paddle/operators/fill_zeros_like_op.cc +++ b/paddle/operators/fill_zeros_like_op.cc @@ -13,8 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/fill_zeros_like_op.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/tensor.h" namespace paddle { namespace operators { @@ -22,14 +20,14 @@ namespace operators { class FillZerosLikeOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - PADDLE_ENFORCE(ctx.InputSize() == 1UL, - "Input size of FillZerosLikeOp must be one."); - PADDLE_ENFORCE(ctx.OutputSize() == 1UL, - "Output size of AddOp must be one."); - PADDLE_ENFORCE(ctx.InputVar(0) != nullptr, - "Input of FillZerosLikeOp must be set."); - PADDLE_ENFORCE(ctx.OutputVar(0) != nullptr, - "Output of FillZerosLikeOp must be set."); + PADDLE_ENFORCE_EQ(ctx.InputSize(), 1UL, + "Input size of FillZerosLikeOp must be one."); + PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1UL, + "Output size of AddOp must be one."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0), + "Input of FillZerosLikeOp must be set."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar(0), + "Output of FillZerosLikeOp must be set."); ctx.Output(0)->Resize( ctx.Input(0)->dims()); } diff --git a/paddle/operators/fill_zeros_like_op.cu b/paddle/operators/fill_zeros_like_op.cu index ed1068219c8fee8c6e8809f450a9d38c8226f317..4f1054cf47e35572dbbc51ca742994065a027919 100644 --- a/paddle/operators/fill_zeros_like_op.cu +++ b/paddle/operators/fill_zeros_like_op.cu @@ -12,6 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +#define EIGEN_USE_GPU #include "paddle/framework/op_registry.h" #include "paddle/operators/fill_zeros_like_op.h" diff --git a/paddle/operators/fill_zeros_like_op.h b/paddle/operators/fill_zeros_like_op.h index 4bff1fbfc15af1f4d1ce9c99fe48b0b0f11b5b3f..dfaed2c9aaf2bf5c1a9b803fc9c8b9ea0e5c5d4e 100644 --- a/paddle/operators/fill_zeros_like_op.h +++ b/paddle/operators/fill_zeros_like_op.h @@ -13,9 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "glog/logging.h" -#include "paddle/framework/eigen.h" -#include "paddle/framework/operator.h" +#include "paddle/operators/type_alias.h" namespace paddle { namespace operators { @@ -26,7 +24,8 @@ class FillZerosLikeKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { auto* output = context.Output(0); output->mutable_data(context.GetPlace()); - framework::EigenVector::Flatten(*output).setZero(); + auto t = framework::EigenVector::Flatten(*output); + t.device(context.GetEigenDevice()) = t.constant(T(0)); } }; diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index 8a4981c7be7587a0cc5f72cabe71e05702112ac3..8ab4e82ac4b795126af7707ce19c6c00da48ee56 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -20,10 +20,10 @@ namespace operators { class MeanOp : public OperatorWithKernel { protected: void InferShape(const InferShapeContext &ctx) const override { - PADDLE_ENFORCE(ctx.InputSize() == 1, "Input size of AddOp must be one"); - PADDLE_ENFORCE(ctx.OutputSize() == 1, "Output size of AddOp must be one"); - PADDLE_ENFORCE(ctx.InputVar(0) != nullptr && ctx.OutputVar(0) != nullptr, - "Input/Output of MeanOp must be initialized."); + PADDLE_ENFORCE_EQ(ctx.InputSize(), 1, "Input size of AddOp must be one"); + PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1, "Output size of AddOp must be one"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0), "input should be set"); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar(0), "output should be set"); ctx.Output(0)->Resize(framework::make_ddim({1})); } }; diff --git a/paddle/operators/net_op.h b/paddle/operators/net_op.h index 6e7af7f02ae23ec65459dfd15d950a43e96fec4d..bb2d02b56f48ac4b2f3b1ca742ae6d6141d3454e 100644 --- a/paddle/operators/net_op.h +++ b/paddle/operators/net_op.h @@ -70,15 +70,15 @@ class NetOp : public framework::OperatorBase { */ void AddOp(const std::shared_ptr& op) { PADDLE_ENFORCE(!add_op_done_, "Cannot AddOp when this network is sealed"); - PADDLE_ENFORCE(op != nullptr, "Cannot Insert Null op"); + PADDLE_ENFORCE_NOT_NULL(op, "Cannot Insert Null op"); ops_.push_back(op); } void InsertOp(size_t pos, const std::shared_ptr& op) { PADDLE_ENFORCE(!add_op_done_, "Cannot InsertOp when this network is sealed"); - PADDLE_ENFORCE(op != nullptr, "Cannot Insert Null op"); - PADDLE_ENFORCE(pos <= ops_.size(), "Out of range"); + PADDLE_ENFORCE_NOT_NULL(op, "Cannot Insert Null op"); + PADDLE_ENFORCE_LE(pos, ops_.size(), "Out of range"); ops_.insert(ops_.begin() + pos, op); } diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc index 6307583f4ee3f185845690d0e378945d066eae75..e0532f2f090aecead499ccef8afb117876be5c78 100644 --- a/paddle/operators/sgd_op.cc +++ b/paddle/operators/sgd_op.cc @@ -20,11 +20,11 @@ namespace operators { class SGDOp : public OperatorWithKernel { protected: void InferShape(const InferShapeContext &ctx) const override { - PADDLE_ENFORCE(ctx.InputSize() == 2, "Input size of SGDOp must be two"); - PADDLE_ENFORCE(ctx.OutputSize() == 1, "Output size of SGDOp must be one"); - PADDLE_ENFORCE(ctx.InputVar(0) != nullptr, "inputs[0] mast be set"); - PADDLE_ENFORCE(ctx.InputVar(1) != nullptr, "inputs[1] mast be set"); - PADDLE_ENFORCE(ctx.OutputVar(0) != nullptr, "outputs[0] mast be set"); + PADDLE_ENFORCE_EQ(ctx.InputSize(), 2, "Input size of SGDOp must be two"); + PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1, "Output size of SGDOp must be one"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0), "inputs[0] mast be set"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(1), "inputs[1] mast be set"); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar(0), "outputs[0] mast be set"); PADDLE_ENFORCE(ctx.Input(0)->dims() == ctx.Input(1)->dims(), "Two input of SGD Op's dimension must be same."); ctx.Output(0)->Resize(ctx.Input(0)->dims()); diff --git a/paddle/operators/softmax_op.cc b/paddle/operators/softmax_op.cc index a070458f5e55cf47253ab0df5af7a1163b4f8092..c08e1b153c05baa474bcd344c1e87405193cb688 100644 --- a/paddle/operators/softmax_op.cc +++ b/paddle/operators/softmax_op.cc @@ -20,12 +20,12 @@ namespace operators { class SoftmaxOp : public OperatorWithKernel { protected: void InferShape(const InferShapeContext &ctx) const override { - PADDLE_ENFORCE(ctx.InputSize() == 1UL, - "Only one input is need for softmax"); - PADDLE_ENFORCE(ctx.Input("X")->dims().size() == 2UL, - "The input of softmax op must be matrix"); - PADDLE_ENFORCE(ctx.OutputSize() == 1UL, - "Only one output is need for softmax"); + PADDLE_ENFORCE_EQ(ctx.InputSize(), 1UL, + "Only one input is need for softmax"); + PADDLE_ENFORCE_EQ(ctx.Input("X")->dims().size(), 2UL, + "The input of softmax op must be matrix"); + PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1UL, + "Only one output is need for softmax"); ctx.Output("Y")->Resize(ctx.Input("X")->dims()); } }; @@ -43,13 +43,13 @@ class SoftmaxOpMaker : public OpProtoAndCheckerMaker { class SoftmaxOpGrad : public OperatorWithKernel { protected: void InferShape(const InferShapeContext &ctx) const override { - PADDLE_ENFORCE(ctx.InputSize() == 3UL, - "Input of SoftmaxOpGrad should be 3, X, Y, YG"); - PADDLE_ENFORCE(ctx.OutputSize() == 1UL, - "Output of SoftmaxOpGrad should be 1"); - PADDLE_ENFORCE(ctx.InputVar("Y") != nullptr, "Input(Y) should not be null"); - PADDLE_ENFORCE(ctx.InputVar(framework::GradVarName("Y")) != nullptr, - "Input(Y@GRAD) should not be null"); + PADDLE_ENFORCE_EQ(ctx.InputSize(), 3UL, + "Input of SoftmaxOpGrad should be 3, X, Y, YG"); + PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1UL, + "Output of SoftmaxOpGrad should be 1"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should not be null"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Y")), + "Input(Y@GRAD) should not be null"); PADDLE_ENFORCE(ctx.Input("Y")->dims() == ctx.Input(framework::GradVarName("Y"))->dims(), "the shape of Input(0) and Input(1) should be the same"); diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index bc0715656a7d61774d53d4a0643ec1c105706085..d2adb997de8e36922d5056b20f238a82eee74f8c 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -187,25 +187,16 @@ inline void throw_on_error(T e) { __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <, >=, __VA_ARGS__) #define PADDLE_ENFORCE_LE(__VAL0, __VAL1, ...) \ __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <=, >, __VA_ARGS__) - -// if two values have different data types, choose a compatible type for them. -template -struct CompatibleType { - static const bool t1_to_t2 = std::is_convertible::value; - typedef typename std::conditional::type type; -}; +#define PADDLE_ENFORCE_NOT_NULL(__VAL, ...) \ + PADDLE_ENFORCE(nullptr != (__VAL), #__VAL " should not be null\n%s", \ + paddle::string::Sprintf("" __VA_ARGS__)); #define __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, __CMP, __INV_CMP, ...) \ - PADDLE_ENFORCE(__COMPATIBLE_TYPE(__VAL0, __VAL1, __VAL0) \ - __CMP __COMPATIBLE_TYPE(__VAL0, __VAL1, __VAL1), \ + PADDLE_ENFORCE(__VAL0 __CMP __VAL1, \ "enforce %s " #__CMP " %s failed, %s " #__INV_CMP " %s\n%s", \ #__VAL0, #__VAL1, std::to_string(__VAL0), \ std::to_string(__VAL1), \ paddle::string::Sprintf("" __VA_ARGS__)); -#define __COMPATIBLE_TYPE(__VAL0, __VAL1, __VAL) \ - typename paddle::platform::CompatibleType::type(__VAL) - } // namespace platform } // namespace paddle diff --git a/paddle/platform/enforce_test.cc b/paddle/platform/enforce_test.cc index 7117b49474044af08ae9db79c2fae6693e966af2..4dfb69754608cb1120baa295072c3d031a4e1a7b 100644 --- a/paddle/platform/enforce_test.cc +++ b/paddle/platform/enforce_test.cc @@ -9,8 +9,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/platform/enforce.h" +#include + #include "gtest/gtest.h" +#include "paddle/platform/enforce.h" TEST(ENFORCE, OK) { PADDLE_ENFORCE(true, "Enforce is ok %d now %f", 123, 0.345); @@ -196,3 +198,27 @@ TEST(ENFORCE_LT, FAIL) { ASSERT_TRUE(in_catch); } + +TEST(ENFORCE_NOT_NULL, OK) { + int* a = new int; + PADDLE_ENFORCE_NOT_NULL(a); + delete a; +} +TEST(ENFORCE_NOT_NULL, FAIL) { + bool in_catch = false; + int* a{nullptr}; + + try { + PADDLE_ENFORCE_NOT_NULL(a); + + } catch (paddle::platform::EnforceNotMet error) { + in_catch = true; + const std::string msg = "a should not be null"; + const char* what = error.what(); + for (size_t i = 0; i < msg.length(); ++i) { + ASSERT_EQ(what[i], msg[i]); + } + } + + ASSERT_TRUE(in_catch); +} diff --git a/paddle/pybind/CMakeLists.txt b/paddle/pybind/CMakeLists.txt index 29dd0ded0ac75893da7e244d92725cd5e285efce..8e6b258e00c0012876cda8ffc5b340322d51e894 100644 --- a/paddle/pybind/CMakeLists.txt +++ b/paddle/pybind/CMakeLists.txt @@ -6,4 +6,5 @@ cc_library(paddle_pybind SHARED add_op mean_op cross_entropy_op - recurrent_op) + recurrent_op + fill_zeros_like_op) diff --git a/paddle/scripts/CMakeLists.txt b/paddle/scripts/CMakeLists.txt index 66a46e1883a49d491f0cb3056a7039407d72e337..a52f06fe497dac467e4ef2543ebda7a423ca326d 100644 --- a/paddle/scripts/CMakeLists.txt +++ b/paddle/scripts/CMakeLists.txt @@ -1,17 +1,15 @@ configure_file(submit_local.sh.in - submit_local.sh + paddle @ONLY) -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/submit_local.sh DESTINATION bin +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/paddle DESTINATION bin PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ - GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ - RENAME paddle) + GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ) configure_file(tools/usage_stat/usage.sh - usage.sh + paddle_usage @ONLY) -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/usage.sh DESTINATION opt/paddle/bin +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/paddle_usage DESTINATION opt/paddle/bin PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ - GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ - RENAME paddle_usage) + GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ) diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index ede9e210245df740f13ebb32c98313554f522dd9..44442be4729ff77e8d378c93acebe1486eb75397 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -33,6 +33,9 @@ Configuring cmake in /paddle/build ... -DWITH_AVX=${WITH_AVX:-OFF} -DWITH_GOLANG=${WITH_GOLANG:-OFF} -DWITH_SWIG_PY=ON + -DWITH_C_API=${WITH_C_API:-OFF} + -DWITH_PYTHON=${WITH_PYTHON:-ON} + -DWITH_SWIG_PY=${WITH_SWIG_PY:-ON} -DCUDNN_ROOT=/usr/ -DWITH_STYLE_CHECK=${WITH_STYLE_CHECK:-OFF} -DWITH_TESTING=${WITH_TESTING:-OFF} @@ -49,7 +52,9 @@ cmake .. \ -DWITH_GPU=${WITH_GPU:-OFF} \ -DWITH_AVX=${WITH_AVX:-OFF} \ -DWITH_GOLANG=${WITH_GOLANG:-OFF} \ - -DWITH_SWIG_PY=ON \ + -DWITH_SWIG_PY=${WITH_SWIG_PY:-ON} \ + -DWITH_C_API=${WITH_C_API:-OFF} \ + -DWITH_PYTHON=${WITH_PYTHON:-ON} \ -DCUDNN_ROOT=/usr/ \ -DWITH_STYLE_CHECK=${WITH_STYLE_CHECK:-OFF} \ -DWITH_TESTING=${WITH_TESTING:-OFF} \ diff --git a/paddle/scripts/docker/build_android.sh b/paddle/scripts/docker/build_android.sh index 56d290be4ab04a9f6974023159aa8571d27f8dd5..5584e29e2a155a8062f7d4f2016bd389bd9803f3 100644 --- a/paddle/scripts/docker/build_android.sh +++ b/paddle/scripts/docker/build_android.sh @@ -20,4 +20,4 @@ cmake -DCMAKE_SYSTEM_NAME=Android \ -DWITH_SWIG_PY=OFF \ .. make -j `nproc` -make install +make install -j `nproc` diff --git a/paddle/scripts/submit_local.sh.in b/paddle/scripts/submit_local.sh.in old mode 100644 new mode 100755 diff --git a/paddle/setup.py.in b/paddle/setup.py.in deleted file mode 100644 index af107e76723135124e56db52a76e4f8aff5c4acf..0000000000000000000000000000000000000000 --- a/paddle/setup.py.in +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from setuptools import setup, Extension - -setup(name="py_paddle", - version="${PADDLE_VERSION}", - packages=['py_paddle'], - include_package_data=True, - package_data={'py_paddle':['*.py','_swig_paddle.so']}, - install_requires = [ - 'nltk>=3.2.2', - # We use `numpy.flip` in `test_image.py`. - # `numpy.flip` is introduced in `1.12.0` - 'numpy>=1.12.0', # The numpy is required. - 'protobuf==${PROTOBUF_VERSION}' # The paddle protobuf version - ], - url='http://www.paddlepaddle.org/', - license='Apache 2.0', -) diff --git a/paddle/trainer/tests/simple_sparse_neural_network.py b/paddle/trainer/tests/simple_sparse_neural_network.py index 9604e1b9b45e571130c2f1bdc6d6a5fbd9c177c4..30346ef299d0bc8585ccff7f2fc4885b0d9f9dfc 100644 --- a/paddle/trainer/tests/simple_sparse_neural_network.py +++ b/paddle/trainer/tests/simple_sparse_neural_network.py @@ -1,6 +1,6 @@ from paddle.trainer_config_helpers import * -settings(batch_size=128, learning_method=AdaGradOptimizer(), learning_rate=1e-4) +settings(batch_size=17, learning_method=AdaGradOptimizer(), learning_rate=1e-4) file_list = 'trainer/tests/fake_file_list.list' @@ -12,7 +12,7 @@ define_py_data_sources2( embedding = embedding_layer( input=data_layer( - name="word_ids", size=65536), + name="word_ids", size=8191), size=128, param_attr=ParamAttr(sparse_update=True)) prediction = fc_layer(input=embedding, size=10, act=SoftmaxActivation()) diff --git a/paddle/trainer/tests/simple_sparse_neural_network_dp.py b/paddle/trainer/tests/simple_sparse_neural_network_dp.py index 8bfd1f37e7114f2dcd0798ff1e8180b111ad988f..86b272edfe1bbb23c45cffe282f6475ceaa0cc41 100644 --- a/paddle/trainer/tests/simple_sparse_neural_network_dp.py +++ b/paddle/trainer/tests/simple_sparse_neural_network_dp.py @@ -7,15 +7,15 @@ def init_hook(settings, is_train, **kwargs): @provider( - input_types={'word_ids': integer_value(65536), + input_types={'word_ids': integer_value(8191), 'label': integer_value(10)}, min_pool_size=0, init_hook=init_hook) def process(settings, filename): if settings.is_train: - data_size = 2**20 - else: data_size = 2**10 + else: + data_size = 2**5 for _ in xrange(data_size): - yield random.randint(0, 65535), random.randint(0, 9) + yield random.randint(0, 8190), random.randint(0, 9) diff --git a/paddle/trainer/tests/test_TrainerOnePass.cpp b/paddle/trainer/tests/test_TrainerOnePass.cpp index 4d0174f784a0dc7314977d586c3ad1f0f9c69f6d..00ba61377aeff17d82e03f7560c0d71b3570d14f 100644 --- a/paddle/trainer/tests/test_TrainerOnePass.cpp +++ b/paddle/trainer/tests/test_TrainerOnePass.cpp @@ -100,25 +100,25 @@ TEST(average_window, gpu) { } TEST(average_window, gpu2) { - FLAGS_num_passes = 100; + FLAGS_num_passes = 20; trainerOnePassTest(configFile1, true, false, 2, 0.01); FLAGS_num_passes = 1; } TEST(average_window, gpu4) { - FLAGS_num_passes = 100; + FLAGS_num_passes = 20; trainerOnePassTest(configFile1, true, false, 4, 0.01); FLAGS_num_passes = 1; } TEST(average_window_cpu, gpu2) { - FLAGS_num_passes = 100; + FLAGS_num_passes = 20; trainerOnePassTest(configFile1, true, false, 2, 0.01, true); FLAGS_num_passes = 1; } TEST(average_window_cpu, gpu4) { - FLAGS_num_passes = 100; + FLAGS_num_passes = 20; trainerOnePassTest(configFile1, true, false, 4, 0.01, true); FLAGS_num_passes = 1; } diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 0171f9d8ccd6045cb876d57684269a2a49e77f96..b5030da8e75eb94e857ae4effc6adb6d19dc0e93 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -39,7 +39,7 @@ add_custom_command(OUTPUT ${OUTPUT_DIR}/.timestamp DEPENDS gen_proto_py copy_paddle_pybind framework_py_proto ${PY_FILES} ${external_project_dependencies} ${COPY_PADDLE_MASTER}) add_custom_target(paddle_python ALL DEPENDS - ${OUTPUT_DIR}/.timestamp) + ${OUTPUT_DIR}/.timestamp paddle_pserver_main paddle_trainer paddle_merge_model python_api_wheel) set(PADDLE_PYTHON_PACKAGE_DIR ${CMAKE_CURRENT_BINARY_DIR}/dist/) diff --git a/python/paddle/v2/framework/network.py b/python/paddle/v2/framework/network.py deleted file mode 100644 index cfeb0e3dec0fd2c6ad4d2d2501f97932495fdd41..0000000000000000000000000000000000000000 --- a/python/paddle/v2/framework/network.py +++ /dev/null @@ -1,131 +0,0 @@ -import paddle.v2.framework.core as core -from paddle.v2.framework.create_op_creation_methods import op_creations -from default_scope_funcs import new_var, find_var, get_cur_scope - -__all__ = ['Network'] # Only expose Network - - -class NetworkFunctor(object): - """ - Network Op Creation Function. Used internally in this module. - It convert string input to Variable. If it is not created before, just - create in scope. - - It is a functor object. means the instances are callable. - - :param func: The op creation function which generated in Python. - :param net: The Network instance. - """ - - def __init__(self, func, net): - self.func = func - self.net = net - - def __call__(self, *args, **kwargs): - if len(args) != 0: - raise ValueError("Paddle must use keyword argument") - inputs = self.func.all_input_args - for ipt in inputs: - if ipt in kwargs: - var = kwargs[ipt] - if isinstance(var, basestring): - tmp = new_var(var) - self.net.var_names[tmp] = var - var = tmp - - if not isinstance(var, core.Variable): - raise TypeError( - "Input of op creation must be string or variable") - - kwargs[ipt] = self.net.var_names[var] - - notemp_outputs = self.func.all_not_temp_output_args - - for name in notemp_outputs: - if name not in kwargs: - kwargs[ - name] = self.func.__name__ + "@OUT@%d" % core.unique_integer( - ) - - outputs = self.func.all_output_args - for opt in outputs: - if opt in kwargs: - var = kwargs[opt] - if isinstance(var, basestring): - tmp = new_var(var) - self.net.var_names[tmp] = var - var = tmp - - if not isinstance(var, core.Variable): - raise TypeError( - "Output of op creation must be string or variable") - kwargs[opt] = self.net.var_names[var] - - op = self.func(**kwargs) - - self.net.net.add_op(op) - - lst = [find_var(kwargs[opt]) for opt in notemp_outputs] - if len(lst) == 1: - return lst[0] - elif len(lst) == 0: - return None - else: - return lst - - -class Network(object): - """ - The network concept. It avoid user to manually create operator, create - variable, and combine them into a Net. Just use Network.xxx can create the - operator, create variables in default scope, and add them into `self.net`. - - For example: - - .. code-block: python - - net = Network() - out = net.add_two(X="a", Y="b") - fc_out = net.fc(X="out", W="fc.w") - - net.run(...) - """ - - def __init__(self): - self.net = core.Net.create() - funcs = (func_name for func_name in dir(op_creations) - if not func_name.startswith("__")) - self.var_names = dict() - - # TODO(yuyang18): This code can work, but do not generate a good - # docstring, try to give a better way generate function in runtime - # later. - for func_name in funcs: - func = getattr(op_creations, func_name) - impl = NetworkFunctor(func, self) - setattr(self, func_name, impl.__call__) - self.__complete_add_op__ = False - - def infer_shape(self): - self.complete_add_op() - self.net.infer_shape(get_cur_scope()) - - def run(self, device_context): - self.complete_add_op() - self.net.run(get_cur_scope(), device_context) - - def __str__(self): - return str(self.net) - - def complete_add_op(self): - if not self.__complete_add_op__: - self.net.complete_add_op() - self.__complete_add_op__ = True - - -if __name__ == '__main__': - net = Network() - out = net.add_two(X="a", Y="b") - fc_out = net.fc(X=out, W="fc.w", b="fc.b", activation="softmax") - net.complete_add_op() - print net diff --git a/python/paddle/v2/framework/create_op_creation_methods.py b/python/paddle/v2/framework/op.py similarity index 66% rename from python/paddle/v2/framework/create_op_creation_methods.py rename to python/paddle/v2/framework/op.py index 6fd33b366b6d376cc51ba5d663bb04d45ab8c933..7fd8b55a5d167294d3270c79f7b64da03443afd3 100644 --- a/python/paddle/v2/framework/create_op_creation_methods.py +++ b/python/paddle/v2/framework/op.py @@ -2,7 +2,6 @@ import paddle.v2.framework.core as core import paddle.v2.framework.proto.op_proto_pb2 as op_proto_pb2 import paddle.v2.framework.proto.op_desc_pb2 as op_desc_pb2 import paddle.v2.framework.proto.attribute_pb2 as attribute_pb2 -import cStringIO def get_all_op_protos(): @@ -146,64 +145,14 @@ class OpDescCreationMethod(object): return False -def get_docstring_from_op_proto(op_proto): - """ - Generate docstring from a OpProto - :param op_proto: a OpProto instance. - :type op_proto: op_proto_pb2.OpProto - :return: docstring - """ - if not isinstance(op_proto, op_proto_pb2.OpProto): - raise TypeError("Input must be OpProto") - f = cStringIO.StringIO() - f.write(op_proto.comment) - f.write("\n") - - def __append_param__(name, comment, type): - # Maybe replace the following line with template engine is better. - f.write(":param ") - f.write(name) - f.write(": ") - f.write(comment) - f.write("\n") - f.write(":type ") - f.write(name) - f.write(": ") - f.write(type) - f.write("\n") - - for ipt in op_proto.inputs: - __append_param__(ipt.name, ipt.comment, "list | basestr" - if ipt.multiple else "basestr") - - temp_var_prefix = \ - "This is a temporary variable. It does not have to set by user. " - for opt in op_proto.outputs: - __append_param__(opt.name, opt.comment if not opt.temporary else - temp_var_prefix + opt.comment, "list | basestr" - if opt.multiple else "basestr") - - for attr in op_proto.attrs: - attr_type = None - if attr.type == attribute_pb2.INT: - attr_type = "int" - elif attr.type == attribute_pb2.FLOAT: - attr_type = "float" - elif attr.type == attribute_pb2.STRING: - attr_type = "basestr" - elif attr.type == attribute_pb2.INTS: - attr_type = "list of int" - elif attr.type == attribute_pb2.FLOATS: - attr_type = "list of float" - elif attr.type == attribute_pb2.STRINGS: - attr_type = "list of basestr" - - if attr_type is None: - raise RuntimeError("Not supported attribute type " + attr.type) - - __append_param__(attr.name, attr.comment, attr_type) - - return f.getvalue() +class OpInfo(object): + def __init__(self, name, method, inputs, outputs, attrs, no_temp_outputs): + self.name = name + self.method = method + self.inputs = inputs + self.outputs = outputs + self.attrs = attrs + self.no_temp_outputs = no_temp_outputs def create_op_creation_method(op_proto): @@ -216,38 +165,57 @@ def create_op_creation_method(op_proto): opdesc = method(*args, **kwargs) return core.Operator.create(opdesc.SerializeToString()) - __impl__.__doc__ = get_docstring_from_op_proto(op_proto) - __impl__.all_input_args = [var.name for var in op_proto.inputs] - __impl__.all_output_args = [var.name for var in op_proto.outputs] - __impl__.all_attr_args = [attr.name for attr in op_proto.attrs] - __impl__.all_not_temp_output_args = [ - var.name for var in op_proto.outputs if not var.temporary - ] + return OpInfo( + method=__impl__, + name=op_proto.type, + inputs=[var.name for var in op_proto.inputs], + outputs=[var.name for var in op_proto.outputs], + attrs=[attr.name for attr in op_proto.attrs], + no_temp_outputs=[ + var.name for var in op_proto.outputs if not var.temporary + ]) - return __impl__ +class OperatorFactory(object): + def __init__(self): + self.op_methods = dict() + for op_proto in get_all_op_protos(): + method = create_op_creation_method(op_proto) + self.op_methods[method.name] = method -class OpCreationsHolder(object): - """ - A object will holds all op creation methods. - - Use `op_creations.xxx_op` to access them. - """ - pass + def __call__(self, *args, **kwargs): + if 'type' in kwargs: + if len(args) != 0: + raise ValueError("All Paddle argument should be key-word " + "argument except type") + t = kwargs.pop('type') + else: + if len(args) != 1: + raise ValueError("All Paddle argument should be key-word " + "argument except type") + t = args[0] + return self.get_op_info(t).method(**kwargs) -op_creations = OpCreationsHolder() + def types(self): + return self.op_methods.keys() + def get_op_info(self, t): + if t not in self.op_methods: + raise ValueError("operator %s is not registered", t) + return self.op_methods.get(t) -def __bootstrap__(): - """ - Bootstrap function for this module. It will dynamic create all op creation - methods in runtime. - """ - for op_proto in get_all_op_protos(): - func = create_op_creation_method(op_proto) - func.__name__ = str(op_proto.type) - setattr(op_creations, func.__name__, func) + def get_op_input_names(self, type): + return self.get_op_info(type).inputs + + def get_op_output_names(self, type): + return self.get_op_info(type).outputs + + def get_op_attr_names(self, type): + return self.get_op_info(type).attrs + + def get_op_no_temp_output_names(self, type): + return self.get_op_info(type).no_temp_outputs -__bootstrap__() +Operator = OperatorFactory() # Default global factory diff --git a/python/paddle/v2/framework/tests/CMakeLists.txt b/python/paddle/v2/framework/tests/CMakeLists.txt index 7eec37678815587b451008eef587b23bcb9beeaf..541639ac21661529b0b1f2cc8d8fa25605052c8c 100644 --- a/python/paddle/v2/framework/tests/CMakeLists.txt +++ b/python/paddle/v2/framework/tests/CMakeLists.txt @@ -6,7 +6,6 @@ py_test(test_scope SRCS test_scope.py) py_test(test_tensor SRCS test_tensor.py) py_test(test_mul_op SRCS test_mul_op.py) -py_test(test_network SRCS test_network.py) py_test(test_mean_op SRCS test_mean_op.py) py_test(test_protobuf SRCS test_protobuf.py) @@ -14,10 +13,11 @@ py_test(test_protobuf SRCS test_protobuf.py) py_test(test_add_two_op SRCS test_add_two_op.py) py_test(test_sigmoid_op SRCS test_sigmoid_op.py) py_test(test_softmax_op SRCS test_softmax_op.py) +py_test(test_fill_zeros_like_op SRCS test_fill_zeros_like_op.py) py_test(gradient_checker SRCS gradient_checker.py) py_test(test_rowwise_add_op SRCS test_rowwise_add_op.py) py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py) -py_test(test_op_creation_methods SRCS test_op_creation_methods.py) +py_test(test_operator SRCS test_operator.py) diff --git a/python/paddle/v2/framework/tests/gradient_checker.py b/python/paddle/v2/framework/tests/gradient_checker.py index 4022de1c40e41aa77a7f31d82b55b63585cbd5f5..cfd29932f5b46920815819c5a75d62a0138e21a2 100644 --- a/python/paddle/v2/framework/tests/gradient_checker.py +++ b/python/paddle/v2/framework/tests/gradient_checker.py @@ -1,5 +1,5 @@ import paddle.v2.framework.core as core -from paddle.v2.framework.create_op_creation_methods import op_creations +from paddle.v2.framework.op import Operator import numpy import unittest @@ -80,7 +80,7 @@ if __name__ == '__main__': class GetNumericGradientTest(unittest.TestCase): def test_add_op(self): - add_op = op_creations.add_two(X="X", Y="Y", Out="Z") + add_op = Operator('add_two', X="X", Y="Y", Out="Z") x = numpy.random.random((10, 1)).astype("float32") y = numpy.random.random((10, 1)).astype("float32") diff --git a/python/paddle/v2/framework/tests/op_test_util.py b/python/paddle/v2/framework/tests/op_test_util.py index e6bc7d8a9b5ddd4582a5ef8a47cb63a7e5911892..da6bed0fcd690d5a7f53f44d0181c75f12e5d074 100644 --- a/python/paddle/v2/framework/tests/op_test_util.py +++ b/python/paddle/v2/framework/tests/op_test_util.py @@ -1,7 +1,7 @@ import paddle.v2.framework.core as core import unittest import numpy -import paddle.v2.framework.create_op_creation_methods as creation +from paddle.v2.framework.op import Operator class OpTestMeta(type): @@ -21,18 +21,14 @@ class OpTestMeta(type): obj = super(OpTestMeta, cls).__new__(cls, name, bases, attrs) def test_all(self): - func = getattr(creation.op_creations, self.type, None) - self.assertIsNotNone(func) - scope = core.Scope() kwargs = dict() - places = [] - places.append(core.CPUPlace()) - if core.is_compile_gpu(): + places = [core.CPUPlace()] + if core.is_compile_gpu() and core.Operator.support_gpu(self.type): places.append(core.GPUPlace(0)) for place in places: - for in_name in func.all_input_args: + for in_name in Operator.get_op_input_names(self.type): if hasattr(self, "inputs") and in_name in self.inputs: kwargs[in_name] = in_name var = scope.new_var(in_name).get_tensor() @@ -42,7 +38,7 @@ class OpTestMeta(type): else: kwargs[in_name] = "@EMPTY@" - for out_name in func.all_output_args: + for out_name in Operator.get_op_output_names(self.type): if not hasattr(self, "outputs"): raise ValueError( "The test op must set self.outputs dict.") @@ -52,21 +48,23 @@ class OpTestMeta(type): kwargs[out_name] = out_name scope.new_var(out_name).get_tensor() - for attr_name in func.all_attr_args: + for attr_name in Operator.get_op_attr_names(self.type): if hasattr(self, "attrs") and attr_name in self.attrs: kwargs[attr_name] = self.attrs[attr_name] - op = func(**kwargs) + op = Operator(self.type, **kwargs) op.infer_shape(scope) ctx = core.DeviceContext.create(place) op.run(scope, ctx) - for out_name in func.all_output_args: + for out_name in Operator.get_op_output_names(self.type): actual = numpy.array(scope.find_var(out_name).get_tensor()) expect = self.outputs[out_name] - numpy.isclose(actual, expect) + self.assertTrue( + numpy.allclose(actual, expect), + "output name: " + out_name + "has diff") obj.test_all = test_all return obj diff --git a/python/paddle/v2/framework/tests/test_add_two_op.py b/python/paddle/v2/framework/tests/test_add_two_op.py index 8ef48f4727b0af46a696c6f463045d98e7a08800..c0237830647371e14b755953345965a3eac7bfd2 100644 --- a/python/paddle/v2/framework/tests/test_add_two_op.py +++ b/python/paddle/v2/framework/tests/test_add_two_op.py @@ -2,7 +2,7 @@ import unittest import numpy import paddle.v2.framework.core as core -import paddle.v2.framework.create_op_creation_methods as creation +from paddle.v2.framework.op import Operator from op_test_util import OpTestMeta @@ -21,7 +21,7 @@ class TestAddOp(unittest.TestCase): class TestAddGradOp(unittest.TestCase): def test_add_grad(self): - op = creation.op_creations.add_two(X="X", Y="Y", Out="Out") + op = Operator('add_two', X="X", Y="Y", Out="Out") backward_op = core.Operator.backward(op, set()) self.assertEqual(backward_op.type(), "add_two_grad") expected = '''Op(add_two_grad), inputs:(X, Y, Out, Out@GRAD), outputs:(X@GRAD, Y@GRAD).''' diff --git a/python/paddle/v2/framework/tests/test_fc_op.py b/python/paddle/v2/framework/tests/test_fc_op.py index 00dc4399aaf59e6382692c3a4356f89a7e79a0c5..e24435839d305bb1a4ab7daa3e9684a421468fd8 100644 --- a/python/paddle/v2/framework/tests/test_fc_op.py +++ b/python/paddle/v2/framework/tests/test_fc_op.py @@ -1,7 +1,7 @@ import paddle.v2.framework.core as core import unittest import numpy -import paddle.v2.framework.create_op_creation_methods as creation +from paddle.v2.framework.op import Operator class TestFc(unittest.TestCase): @@ -24,7 +24,7 @@ class TestFc(unittest.TestCase): # Set a real numpy array here. # x_tensor.set(numpy.array([])) - op = creation.op_creations.fc(X="X", Y="Y", W="W") + op = Operator("fc", X="X", Y="Y", W="W") for out in op.outputs(): if scope.find_var(out) is None: diff --git a/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py b/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py new file mode 100644 index 0000000000000000000000000000000000000000..e5c862605fb11a5ea1426cf8f9054589dc377ff1 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py @@ -0,0 +1,16 @@ +import unittest +from op_test_util import OpTestMeta +import numpy + + +class TestFillZerosLikeOp(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = "fill_zeros_like" + self.inputs = {'Src': numpy.random.random((219, 232)).astype("float32")} + self.outputs = {'Dst': numpy.zeros_like(self.inputs['Src'])} + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_net.py b/python/paddle/v2/framework/tests/test_net.py index db776d6b643dc4014da9f5dded8219180af639e3..b30896553dea4a4929038d524b23c6090bbed380 100644 --- a/python/paddle/v2/framework/tests/test_net.py +++ b/python/paddle/v2/framework/tests/test_net.py @@ -1,16 +1,16 @@ import paddle.v2.framework.core as core -from paddle.v2.framework.create_op_creation_methods import op_creations +from paddle.v2.framework.op import Operator import unittest class TestNet(unittest.TestCase): def test_net_all(self): net = core.Net.create() - op1 = op_creations.add_two(X="X", Y="Y", Out="Out") + op1 = Operator("add_two", X="X", Y="Y", Out="Out") net.add_op(op1) net2 = core.Net.create() - net2.add_op(op_creations.fc(X="X", W="w", Y="fc.out")) + net2.add_op(Operator("fc", X="X", W="w", Y="fc.out")) net2.complete_add_op(True) net.add_op(net2) net.complete_add_op(True) diff --git a/python/paddle/v2/framework/tests/test_network.py b/python/paddle/v2/framework/tests/test_network.py deleted file mode 100644 index 6d53e233e959bd39b558ac97cdca381135505f8d..0000000000000000000000000000000000000000 --- a/python/paddle/v2/framework/tests/test_network.py +++ /dev/null @@ -1,32 +0,0 @@ -from paddle.v2.framework.network import Network -import paddle.v2.framework.core as core -import unittest - - -class TestNet(unittest.TestCase): - def test_net_all(self): - net = Network() - out = net.add_two(X="X", Y="Y") - fc_out = net.fc(X=out, W="w") - net.complete_add_op() - self.assertTrue(isinstance(fc_out, core.Variable)) - self.assertEqual( - '''Op(plain_net), inputs:(@EMPTY@, X, Y, w), outputs:(@TEMP@fc@0, add_two@OUT@0, fc@OUT@1). - Op(add_two), inputs:(X, Y), outputs:(add_two@OUT@0). - Op(fc), inputs:(add_two@OUT@0, w, @EMPTY@), outputs:(fc@OUT@1, @TEMP@fc@0). - Op(mul), inputs:(add_two@OUT@0, w), outputs:(@TEMP@fc@0). - Op(sigmoid), inputs:(@TEMP@fc@0), outputs:(fc@OUT@1). -''', str(net)) - - net2 = Network() - tmp = net2.add_two(X="X", Y="Y") - self.assertTrue(isinstance(tmp, core.Variable)) - net2.complete_add_op() - self.assertEqual( - '''Op(plain_net), inputs:(X, Y), outputs:(add_two@OUT@2). - Op(add_two), inputs:(X, Y), outputs:(add_two@OUT@2). -''', str(net2)) - - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/v2/framework/tests/test_op_creation_methods.py b/python/paddle/v2/framework/tests/test_operator.py similarity index 64% rename from python/paddle/v2/framework/tests/test_op_creation_methods.py rename to python/paddle/v2/framework/tests/test_operator.py index 1d2ce6d0717bfb45355fe0cabc516a598492d518..4f164e1a69e3fd0409f9b575a8bd9b4e423b486b 100644 --- a/python/paddle/v2/framework/tests/test_op_creation_methods.py +++ b/python/paddle/v2/framework/tests/test_operator.py @@ -1,5 +1,5 @@ import unittest -import paddle.v2.framework.create_op_creation_methods as creation +import paddle.v2.framework.op as op import paddle.v2.framework.core as core import paddle.v2.framework.proto.op_proto_pb2 as op_proto_pb2 import paddle.v2.framework.proto.op_desc_pb2 as op_desc_pb2 @@ -8,7 +8,7 @@ import paddle.v2.framework.proto.attribute_pb2 as attribute_pb2 class TestGetAllProtos(unittest.TestCase): def test_all(self): - all_protos = creation.get_all_op_protos() + all_protos = op.get_all_op_protos() self.assertNotEqual(0, len(all_protos)) for each in all_protos: @@ -17,25 +17,25 @@ class TestGetAllProtos(unittest.TestCase): class TestOpDescCreationMethod(unittest.TestCase): def test_plain_input_output(self): - op = op_proto_pb2.OpProto() - op.type = "test" - ipt = op.inputs.add() + op_proto = op_proto_pb2.OpProto() + op_proto.type = "test" + ipt = op_proto.inputs.add() ipt.name = "X" ipt.comment = "not matter" - ipt = op.inputs.add() + ipt = op_proto.inputs.add() ipt.name = "Y" ipt.comment = "not matter" - opt = op.outputs.add() + opt = op_proto.outputs.add() opt.name = "Z" opt.comment = "not matter" - op.comment = "not matter" + op_proto.comment = "not matter" - self.assertTrue(op.IsInitialized()) + self.assertTrue(op_proto.IsInitialized()) - method = creation.OpDescCreationMethod(op) + method = op.OpDescCreationMethod(op_proto) output = method(X="a", Y="b", Z="c") expected = op_desc_pb2.OpDesc() @@ -45,29 +45,29 @@ class TestOpDescCreationMethod(unittest.TestCase): self.assertEqual(expected, output) def test_multiple_input_plain_output(self): - op = op_proto_pb2.OpProto() - op.type = "fc" - ipt = op.inputs.add() + op_proto = op_proto_pb2.OpProto() + op_proto.type = "fc" + ipt = op_proto.inputs.add() ipt.name = "X" ipt.comment = "" ipt.multiple = True - ipt = op.inputs.add() + ipt = op_proto.inputs.add() ipt.name = "W" ipt.comment = "" ipt.multiple = True - ipt = op.inputs.add() + ipt = op_proto.inputs.add() ipt.name = "b" ipt.comment = "" - out = op.outputs.add() + out = op_proto.outputs.add() out.name = "Y" out.comment = "" - op.comment = "" - self.assertTrue(op.IsInitialized()) - method = creation.OpDescCreationMethod(op) + op_proto.comment = "" + self.assertTrue(op_proto.IsInitialized()) + method = op.OpDescCreationMethod(op_proto) generated1 = method(X="x", W="w", b="b", Y="y") expected1 = op_desc_pb2.OpDesc() @@ -93,14 +93,14 @@ class TestOpDescCreationMethod(unittest.TestCase): self.assertEqual(expected2, generated2) def test_attrs(self): - op = op_proto_pb2.OpProto() - op.type = "test" - ipt = op.inputs.add() + op_proto = op_proto_pb2.OpProto() + op_proto.type = "test" + ipt = op_proto.inputs.add() ipt.name = 'X' ipt.comment = "" def __add_attr__(name, type): - attr = op.attrs.add() + attr = op_proto.attrs.add() attr.name = name attr.comment = "" attr.type = type @@ -112,10 +112,10 @@ class TestOpDescCreationMethod(unittest.TestCase): __add_attr__("floats_attr", attribute_pb2.FLOATS) __add_attr__("strings_attr", attribute_pb2.STRINGS) - op.comment = "" - self.assertTrue(op.IsInitialized()) + op_proto.comment = "" + self.assertTrue(op_proto.IsInitialized()) - method = creation.OpDescCreationMethod(op) + method = op.OpDescCreationMethod(op_proto) generated = method( X="a", @@ -162,23 +162,23 @@ class TestOpDescCreationMethod(unittest.TestCase): self.assertEqual(expected, generated) def test_input_temporary_output(self): - op = op_proto_pb2.OpProto() - op.type = "test" - out = op.outputs.add() + op_proto = op_proto_pb2.OpProto() + op_proto.type = "test" + out = op_proto.outputs.add() out.name = "OUT" out.comment = "" - out = op.outputs.add() + out = op_proto.outputs.add() out.name = "TMP" out.comment = "" out.temporary = True - out = op.outputs.add() + out = op_proto.outputs.add() out.name = "OUT2" out.comment = "" - op.comment = "" + op_proto.comment = "" - method = creation.OpDescCreationMethod(op) + method = op.OpDescCreationMethod(op_proto) generated = method(OUT="a", OUT2="b") desc = op_desc_pb2.OpDesc() desc.outputs.extend(["a", core.var_names.temp(), "b"]) @@ -190,60 +190,9 @@ class TestOpDescCreationMethod(unittest.TestCase): self.assertEqual(generated, desc) -class TestOpCreationDocStr(unittest.TestCase): - def test_all(self): - op = op_proto_pb2.OpProto() - op.type = "test" - op.comment = """Test Op. - -This op is used for unit test, not a real op. -""" - a = op.inputs.add() - a.name = "a" - a.comment = "Input a for test op" - a.multiple = True - - b = op.inputs.add() - b.name = "b" - b.comment = "Input b for test op" - self.assertTrue(op.IsInitialized()) - - o1 = op.outputs.add() - o1.name = "output" - o1.comment = "The output of test op" - - o2 = op.outputs.add() - o2.name = "temp output" - o2.comment = "The temporary output of test op" - o2.temporary = True - - test_str = op.attrs.add() - test_str.name = "str_attr" - test_str.type = attribute_pb2.STRING - test_str.comment = "A string attribute for test op" - - actual = creation.get_docstring_from_op_proto(op) - expected_docstring = '''Test Op. - -This op is used for unit test, not a real op. - -:param a: Input a for test op -:type a: list | basestr -:param b: Input b for test op -:type b: basestr -:param output: The output of test op -:type output: basestr -:param temp output: This is a temporary variable. It does not have to set by user. The temporary output of test op -:type temp output: basestr -:param str_attr: A string attribute for test op -:type str_attr: basestr -''' - self.assertEqual(expected_docstring, actual) - - class TestOpCreations(unittest.TestCase): def test_all(self): - add_op = creation.op_creations.add_two(X="a", Y="b", Out="z") + add_op = op.Operator("add_two", X="a", Y="b", Out="z") self.assertIsNotNone(add_op) # Invoke C++ DebugString() self.assertEqual('Op(add_two), inputs:(a, b), outputs:(z).', diff --git a/python/paddle/v2/framework/tests/test_softmax_op.py b/python/paddle/v2/framework/tests/test_softmax_op.py index 98ca8ddc860c3825411b02b2f6ed612db46a18d7..d20e085b8e43488480edf07b6cd4edcd861883f3 100644 --- a/python/paddle/v2/framework/tests/test_softmax_op.py +++ b/python/paddle/v2/framework/tests/test_softmax_op.py @@ -2,7 +2,7 @@ import unittest import numpy as np import paddle.v2.framework.core as core -import paddle.v2.framework.create_op_creation_methods as creation +from paddle.v2.framework.op import Operator from op_test_util import OpTestMeta @@ -27,7 +27,7 @@ class TestSoftmaxOp(unittest.TestCase): class TestSoftmaxGradOp(unittest.TestCase): def test_softmax_grad(self): - op = creation.op_creations.softmax(X="X", Y="Y") + op = Operator('softmax', X="X", Y="Y") backward_op = core.Operator.backward(op, set()) self.assertEqual(backward_op.type(), "softmax_grad") expected = '''Op(softmax_grad), inputs:(X, Y, Y@GRAD), outputs:(X@GRAD).''' diff --git a/python/setup.py.in b/python/setup.py.in index 7808238aa6ba5ca5c13292638f1c513f87cc2af2..38f0a503bee3eb29ae3c893c96d6e333be54b96e 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -1,4 +1,8 @@ -from setuptools import setup +from setuptools import setup, Distribution + +class BinaryDistribution(Distribution): + def has_ext_modules(foo): + return True packages=['paddle', 'paddle.proto', @@ -11,7 +15,8 @@ packages=['paddle', 'paddle.v2.master', 'paddle.v2.plot', 'paddle.v2.framework', - 'paddle.v2.framework.proto'] + 'paddle.v2.framework.proto', + 'py_paddle'] setup_requires=["requests", "numpy>=1.12", @@ -21,23 +26,33 @@ setup_requires=["requests", "rarfile", "scipy>=0.19.0", "Pillow", - "nltk"] + "nltk>=3.2.2"] if '${CMAKE_SYSTEM_PROCESSOR}' not in ['arm', 'armv7-a', 'aarch64']: setup_requires+=["opencv-python"] -setup(name='paddle', +setup(name='paddlepaddle', version='${PADDLE_VERSION}', description='Parallel Distributed Deep Learning', install_requires=setup_requires, packages=packages, - package_data={'paddle.v2.master': ['libpaddle_master.so'], - 'paddle.v2.framework': ['core.so'] + package_data={ + 'paddle.v2.master': ['libpaddle_master.so'], + 'paddle.v2.framework': ['core.so'], + 'py_paddle':['*.py','_swig_paddle.so'] }, package_dir={ '': '${CMAKE_CURRENT_SOURCE_DIR}', # The paddle.v2.framework.proto will be generated while compiling. # So that package points to other directory. - 'paddle.v2.framework.proto': '${PROJ_BINARY_ROOT}/paddle/framework' + 'paddle.v2.framework.proto': '${PROJ_BINARY_ROOT}/paddle/framework', + 'py_paddle': '${PROJ_ROOT}/paddle/py_paddle' }, + scripts=['${PROJ_BINARY_ROOT}/paddle/scripts/paddle'], + distclass=BinaryDistribution, + data_files=[('/usr/local/opt/paddle/bin', + ['${PROJ_BINARY_ROOT}/paddle/scripts/paddle_usage', + '${PROJ_BINARY_ROOT}/paddle/trainer/paddle_trainer', + '${PROJ_BINARY_ROOT}/paddle/trainer/paddle_merge_model', + '${PROJ_BINARY_ROOT}/paddle/pserver/paddle_pserver_main'])] )