提交 133dfc03 编写于 作者: W wangkuiyi 提交者: GitHub

Merge pull request #629 from wangkuiyi/docker_openssh

Re-define the way we build Docker images
上级 95ef1af2 8e6965be
develop 2.0.1-rocm-post Ligoml-patch-1 OliverLPH-patch-1 OliverLPH-patch-2 PaddlePM-patch-1 PaddlePM-patch-2 ZHUI-patch-1 add_default_att add_model_benchmark_ci add_some_yaml_config addfile all_new_design_exec ascendrc ascendrelease cherry_undefined_var compile_windows delete_2.0.1-rocm-post delete_add_default_att delete_all_new_design_exec delete_ascendrc delete_compile_windows delete_delete_addfile delete_disable_iterable_dataset_unittest delete_fix_dataloader_memory_leak delete_fix_imperative_dygraph_error delete_fix_retry_ci delete_fix_undefined_var delete_improve_sccache delete_incubate/lite delete_paddle_tiny_install delete_paralleltest delete_prv-disable-more-cache delete_revert-31068-fix_conv3d_windows delete_revert-31562-mean delete_revert-33630-bug-fix delete_revert-34159-add_npu_bce_logical_dev delete_revert-34910-spinlocks_for_allocator delete_revert-35069-revert-34910-spinlocks_for_allocator delete_revert-36057-dev/read_flags_in_ut dingjiaweiww-patch-1 disable_iterable_dataset_unittest dy2static enable_eager_model_test final_state_gen_python_c final_state_intermediate fix-numpy-issue fix_concat_slice fix_dataloader_memory_leak fix_imperative_dygraph_error fix_npu_ci fix_op_flops fix_retry_ci fix_rnn_docs fix_tensor_type fix_undefined_var fixiscan fixiscan1 fixiscan2 fixiscan3 github/fork/123malin/netifaces github/fork/123malin/tdm_abacus github/fork/AshburnLee/dev_unique github/fork/ForFishes/fix_memory_matmul github/fork/ForFishes/rm_fluid github/fork/LielinJiang/move-2.0-api github/fork/LielinJiang/visual-dl-cb github/fork/LiuChiachi/add-transformer-generate-square-subsequent-mask-api github/fork/LiuChiachi/fix-example-code-for-hapi-Model github/fork/LiuChiachi/remove-input-requirment-in-dygraph-Model github/fork/MrChengmo/fix_ps_profiler github/fork/MrChengmo/update_ps_heter github/fork/PWhiddy/patch-1 github/fork/Shixiaowei02/dev/save_load_upgrade github/fork/TCChenlong/fix_hapi github/fork/TCChenlong/fix_inden github/fork/Thunderbrook/xpu_slice github/fork/XieYunshen/disable_ut_test_parallel_executor_fetch_isolated_var github/fork/XieYunshen/disable_ut_test_parallel_executor_fetch_isolated_var_2 github/fork/XieYunshen/disable_ut_test_parallel_executor_fetch_isolated_var_3 github/fork/XieYunshen/timeout_20S_ut github/fork/ZeyuChen/remove-nltk github/fork/arlesniak/arlesniak/selective__mkldnn_flags github/fork/baiyfbupt/code_doc_mig github/fork/chalsliu/set_timeout github/fork/chen-zhiyu/develop github/fork/chenwhql/ci/try_to_find_test_buffer_shared_memory_reuse_pass_error github/fork/chenwhql/dygraph/remove_scale_loss_and_apply_collective_grads github/fork/chenwhql/saveload/add_get_inference_program github/fork/chenwhql/saveload/remove_save_load_config github/fork/cryoco/pass-compatibility-trt github/fork/danleifeng/isempty_api2.0 github/fork/frankwhzhang/api_transfer github/fork/hbwx24/error_msg/cuda_kernel_error_msg github/fork/heavengate/cherry_yolo_box github/fork/heavengate/update_yolo_box github/fork/iclementine/rnn_fix github/fork/iducn/testestse github/fork/jczaja/prv-25537-fix github/fork/jeff41404/release/1.8 github/fork/jiweibo/api_2.0 github/fork/jiweibo/fix_lite_resnet50_test github/fork/juncaipeng/fix_doc_1 github/fork/lfchener/sample_code github/fork/littletomatodonkey/fix_reg_doc github/fork/liym27/dy2stat_update_assign_to_rc20 github/fork/luotao1/profiler_ut github/fork/mapingshuo/add_wait github/fork/mapingshuo/doc_2.0 github/fork/mapingshuo/zero-0.5 github/fork/miraiwk/dev github/fork/pangyoki/add-Categorical-class-branch github/fork/pangyoki/add-multinomial-op-branch github/fork/pangyoki/fix-test_distritbution-CI github/fork/qjing666/doublegrad github/fork/qjing666/fix_hdfs_download github/fork/sandyhouse/add_gather_etc github/fork/sandyhouse/add_send_recv_alltoall_etc github/fork/sandyhouse/pipeline_exe_run github/fork/seiriosPlus/feature/large_scale_kv_save_delta github/fork/seiriosPlus/fix/paddle_errors_fix github/fork/seiriosPlus/fix/paddle_op_errors github/fork/shangzhizhou/fix_test_activation_op_random_bug github/fork/smallv0221/yxp0924 github/fork/smallv0221/yxp0925 github/fork/swtkiwi/del-matplotlib github/fork/tianshuo78520a/kunlun_test github/fork/tianshuo78520a/update_dockerfile github/fork/wanghaoshuang/bert_fuse github/fork/wanghaoshuang/label_smooth github/fork/wanghuancoder/develop_CUDASynchronize github/fork/wanghuancoder/develop_Layer_doc github/fork/wanghuancoder/develop_ParameterList_doc github/fork/wanghuancoder/develop_Sequential_doc github/fork/wanghuancoder/develop_bilinear_tensor_product github/fork/wanghuancoder/develop_coverage_build_sh github/fork/wanghuancoder/develop_in_dynamic_mode_doc github/fork/wanghuancoder/develop_unique_name_doc github/fork/wangxicoding/fleet_meta_combine github/fork/wawltor/error_message_fix_5 github/fork/willthefrog/remove_l2_norm github/fork/windstamp/momentum_op github/fork/windstamp/mv_op_5 github/fork/windstamp/normal_api github/fork/wojtuss/wojtuss/fusion_gru_quantization github/fork/wojtuss/wojtuss/quantization-with-shift github/fork/wzzju/fix_err_info github/fork/wzzju/pure_fp16 github/fork/xiemoyuan/op_error_message github/fork/xiemoyuan/optimize_error_message github/fork/yaoxuefeng6/fix_doc github/fork/yaoxuefeng6/mod_dataset_v2 github/fork/yongqiangma/lod github/fork/ysh329/fix-clip-by-norm-error github/fork/ysh329/fix-error-clip-by-value github/fork/yukavio/error_info github/fork/zhangting2020/conv_filter_grad github/fork/zhangting2020/is_compile_with_cuda github/fork/zhangting2020/place_doc github/fork/zhangting2020/program github/fork/zhhsplendid/fix_any github/fork/zhhsplendid/refine_api2 github/fork/zhhsplendid/refine_api2_test github/fork/zhhsplendid/refine_api_test_ptb_lm github/fork/zhhsplendid/refine_api_test_resnet github/fork/zhhsplendid/refine_api_test_simnet github/fork/zhiqiu/dev/refine_initializer github/fork/zhiqiu/dev/remove_inplace_argument github/fork/zlsh80826/nvinfer_plugin_var_len_cuda11 improve_sccache incubate/infrt incubate/lite inplace_addto make_flag_adding_easier master move_embedding_to_phi move_histogram_to_pten move_sgd_to_phi move_slice_to_pten move_temporal_shift_to_phi move_yolo_box_to_phi npu_fix_alloc numel paddle_tiny_install paralleltest preln_ernie prv-disable-more-cache prv-md-even-more prv-onednn-2.5 pten_tensor_refactor release/0.10.0 release/0.11.0 release/0.12.0 release/0.13.0 release/0.14.0 release/0.15.0 release/1.0.0 release/1.1 release/1.2 release/1.3 release/1.4 release/1.5 release/1.6 release/1.7 release/1.8 release/2.0 release/2.0-alpha release/2.0-beta release/2.0-rc release/2.0-rc1 release/2.1 release/2.2 release/2.3 release/2.3-fc-ernie-fix release/2.4 release/lite-0.1 revert-24981-add_device_attr_for_regulization revert-26856-strategy_example2 revert-27520-disable_pr revert-31068-fix_conv3d_windows revert-31562-mean revert-32290-develop-hardlabel revert-33037-forci revert-33475-fix_cifar_label_dimension revert-33630-bug-fix revert-34159-add_npu_bce_logical_dev revert-34406-add_copy_from_tensor revert-34910-spinlocks_for_allocator revert-35069-revert-34910-spinlocks_for_allocator revert-36057-dev/read_flags_in_ut revert-36201-refine_fast_threaded_ssa_graph_executor revert-36985-add_license revert-37318-refactor_dygraph_to_eager revert-37926-eager_coreops_500 revert-37956-revert-37727-pylayer_support_tuple revert-38100-mingdong revert-38301-allocation_rearrange_pr revert-38703-numpy_bf16_package_reupload revert-38732-remove_useless_header_in_elementwise_mul_grad revert-38959-Reduce_Grad revert-39143-adjust_empty revert-39227-move_trace_op_to_pten revert-39268-dev/remove_concat_fluid_kernel revert-40170-support_partial_grad revert-41056-revert-40727-move_some_activaion_to_phi revert-41065-revert-40993-mv_ele_floordiv_pow revert-41068-revert-40790-phi_new revert-41944-smaller_inference_api_test revert-42149-do-not-reset-default-stream-for-stream-safe-cuda-allocator revert-43155-fix_ut_tempfile revert-43882-revert-41944-smaller_inference_api_test revert-45808-phi/simplify_size_op revert-46827-deform_comment rocm_dev_0217 support_weight_transpose test_benchmark_ci test_feature_precision_test_c test_model_benchmark test_model_benchmark_ci zhiqiu-patch-1 v2.4.0-rc0 v2.3.2 v2.3.1 v2.3.0 v2.3.0-rc0 v2.2.2 v2.2.1 v2.2.0 v2.2.0-rc0 v2.2.0-bak0 v2.1.3 v2.1.2 v2.1.1 v2.1.0 v2.1.0-rc0 v2.0.2 v2.0.1 v2.0.0 v2.0.0-rc1 v2.0.0-rc0 v2.0.0-beta0 v2.0.0-alpha0 v1.8.5 v1.8.4 v1.8.3 v1.8.2 v1.8.1 v1.8.0 v1.7.2 v1.7.1 v1.7.0 v1.6.3 v1.6.2 v1.6.1 v1.6.0 v1.6.0-rc0 v1.5.2 v1.5.1 v1.5.0 v1.4.1 v1.4.0 v1.3.2 v1.3.1 v1.3.0 v1.2.1 v1.2.0 v1.1.0 v1.0.2 v1.0.1 v1.0.0 v1.0.0-rc0 v0.15.0 v0.15.0-rc0 v0.14.0 v0.13.0 v0.12.0 v0.11.1a2 v0.11.1a1 v0.11.0 v0.10.0 v0.10.0rc4 v0.10.0rc lite-v0.1
无相关合并请求
cmake_minimum_required(VERSION 2.8) cmake_minimum_required(VERSION 2.8)
project(paddle CXX C) project(paddle CXX C)
set(PADDLE_MAJOR_VERSION 0)
set(PADDLE_MINOR_VERSION 9)
set(PADDLE_PATCH_VERSION 0)
set(PADDLE_VERSION ${PADDLE_MAJOR_VERSION}.${PADDLE_MINOR_VERSION}.${PADDLE_PATCH_VERSION})
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake") set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake")
set(PROJ_ROOT ${CMAKE_SOURCE_DIR}) set(PROJ_ROOT ${CMAKE_SOURCE_DIR})
...@@ -56,7 +52,7 @@ option(ON_COVERALLS "Generating code coverage data on coveralls or not." OFF) ...@@ -56,7 +52,7 @@ option(ON_COVERALLS "Generating code coverage data on coveralls or not." OFF)
option(COVERALLS_UPLOAD "Uploading the generated coveralls json." ON) option(COVERALLS_UPLOAD "Uploading the generated coveralls json." ON)
if(NOT CMAKE_BUILD_TYPE) if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING
"Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel" "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel"
FORCE) FORCE)
endif() endif()
...@@ -75,31 +71,11 @@ include(check_packages) ...@@ -75,31 +71,11 @@ include(check_packages)
include(swig) include(swig)
include(coveralls) include(coveralls)
# add PaddlePaddle version # Set PaddlePaddle version to Git tag name or Git commit ID.
if(DEFINED ENV{PADDLE_VERSION}) find_package(Git REQUIRED)
add_definitions(-DPADDLE_VERSION=\"$ENV{PADDLE_VERSION}\") # version.cmake will get the current PADDLE_VERSION
else() include(version)
if(EXISTS ${PROJ_ROOT}/.svn/) add_definitions(-DPADDLE_VERSION=\"${PADDLE_VERSION}\")
find_package(Subversion REQUIRED)
if(SUBVERSION_FOUND)
Subversion_WC_INFO(${PROJ_ROOT} Project)
add_definitions(-DPADDLE_VERSION=${Project_WC_REVISION})
endif()
elseif(EXISTS ${PROJ_ROOT}/.git/)
find_package(Git REQUIRED)
execute_process(
COMMAND ${GIT_EXECUTABLE} log -1 --format=%H
WORKING_DIRECTORY ${PROJ_ROOT}
OUTPUT_VARIABLE GIT_SHA1
RESULT_VARIABLE GIT_RESULT
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
if(NOT ${GIT_RESULT})
add_definitions(-DPADDLE_VERSION=\"${GIT_SHA1}\")
else()
message(WARNING "Cannot add paddle version from git tag")
endif()
endif()
endif()
if(NOT WITH_GPU) if(NOT WITH_GPU)
......
# Get the latest git tag.
set(PADDLE_VERSION $ENV{PADDLE_VERSION})
set(tmp_version "HEAD")
while ("${PADDLE_VERSION}" STREQUAL "")
execute_process(
COMMAND ${GIT_EXECUTABLE} describe --tags --abbrev=0 ${tmp_version}
WORKING_DIRECTORY ${PROJ_ROOT}
OUTPUT_VARIABLE GIT_TAG_NAME
RESULT_VARIABLE GIT_RESULT
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
if (NOT ${GIT_RESULT})
# Check the tag is a correct version
if (${GIT_TAG_NAME} MATCHES "v[0-9]+\\.[0-9]+\\.[0-9]+(\\.(a|b|rc)\\.[0-9]+)?")
string(REPLACE "v" "" PADDLE_VERSION ${GIT_TAG_NAME})
else() # otherwise, get the previous git tag name.
set(tmp_version "${GIT_TAG_NAME}~1")
endif()
else()
set(PADDLE_VERSION "0.0.0")
message(WARNING "Cannot add paddle version from git tag")
endif()
endwhile()
message(STATUS "Paddle version is ${PADDLE_VERSION}")
Docker installation guide Using and Building Docker Images
========================== ================================
PaddlePaddle provide the `Docker <https://www.docker.com/>`_ image. `Docker`_ is a lightweight container utilities. The performance of PaddlePaddle in `Docker`_ container is basically as same as run it in a normal linux. The `Docker`_ is a very convenient way to deliver the binary release for linux programs. We release PaddlePaddle in the form of `Docker <https://www.docker.com/>`_ images on `dockerhub.com <https://hub.docker.com/r/paddledev/paddle/>`_. Running as Docker containers is currently the only officially-supported way to running PaddlePaddle.
.. note:: Run Docker images
-----------------
The `Docker`_ image is the recommended way to run PaddlePaddle For each version of PaddlePaddle, we release 4 variants of Docker images:
PaddlePaddle Docker images +-----------------+-------------+-------+
-------------------------- | | CPU AVX | GPU |
+=================+=============+=======+
| cpu | yes | no |
+-----------------+-------------+-------+
| cpu-noavx | no | no |
+-----------------+-------------+-------+
| gpu | yes | yes |
+-----------------+-------------+-------+
| gpu-noavx | no | yes |
+-----------------+-------------+-------+
There are 12 `images <https://hub.docker.com/r/paddledev/paddle/tags/>`_ for PaddlePaddle, and the name is :code:`paddle-dev/paddle`, tags are\: We run the following command on Linux to check if the CPU supports :code:`AVX`.
.. code-block:: bash
+-----------------+------------------+------------------------+-----------------------+ if cat /proc/cpuinfo | grep -i avx; then echo Yes; else echo No; fi
| | normal | devel | demo |
+=================+==================+========================+=======================+
| CPU | cpu-latest | cpu-devel-latest | cpu-demo-latest |
+-----------------+------------------+------------------------+-----------------------+
| GPU | gpu-latest | gpu-devel-latest | gpu-demo-latest |
+-----------------+------------------+------------------------+-----------------------+
| CPU WITHOUT AVX | cpu-noavx-latest | cpu-devel-noavx-latest | cpu-demo-noavx-latest |
+-----------------+------------------+------------------------+-----------------------+
| GPU WITHOUT AVX | gpu-noavx-latest | gpu-devel-noavx-latest | gpu-demo-noavx-latest |
+-----------------+------------------+------------------------+-----------------------+
And the three columns are: On Mac OS X, we need to run
* normal\: The docker image only contains binary of PaddlePaddle. .. code-block:: bash
* devel\: The docker image contains PaddlePaddle binary, source code and essential build environment.
* demo\: The docker image contains the dependencies to run PaddlePaddle demo.
And the four rows are: sysctl -a | grep machdep.cpu.leaf7_features
* CPU\: CPU Version. Support CPU which has :code:`AVX` instructions.
* GPU\: GPU Version. Support GPU, and cpu has :code:`AVX` instructions.
* CPU WITHOUT AVX\: CPU Version, which support most CPU even doesn't have :code:`AVX` instructions.
* GPU WITHOUT AVX\: GPU Version, which support most CPU even doesn't have :code:`AVX` instructions.
User can choose any version depends on machine. The following script can help you to detect your CPU support :code:`AVX` or not. Once we determine the proper variant, we can cope with the Docker image tag name by appending the version number. For example, the following command runs the AVX-enabled image of the most recent version:
.. code-block:: bash .. code-block:: bash
if cat /proc/cpuinfo | grep -q avx ; then echo "Support AVX"; else echo "Not support AVX"; fi
If the output is :code:`Support AVX`, then you can choose the AVX version of PaddlePaddle, otherwise, you need select :code:`noavx` version of PaddlePaddle. For example, the CPU develop version of PaddlePaddle is :code:`paddle-dev/paddle:cpu-devel-latest`. docker run -it --rm paddledev/paddle:cpu-latest /bin/bash
The PaddlePaddle images don't contain any entry command. You need to write your entry command to use this image. See :code:`Remote Access` part or just use following command to run a :code:`bash` To run a GPU-enabled image, you need to install CUDA and let Docker knows about it:
.. code-block:: bash .. code-block:: bash
docker run -it paddledev/paddle:cpu-latest /bin/bash
Download and Run Docker images
------------------------------
You have to install Docker in your machine which has linux kernel version 3.10+ first. You can refer to the official guide https://docs.docker.com/engine/installation/ for further information.
You can use :code:`docker pull ` to download images first, or just launch a container with :code:`docker run` \:
.. code-block:: bash
docker run -it paddledev/paddle:cpu-latest
If you want to launch container with GPU support, you need to set some environment variables at the same time:
.. code-block:: bash
export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')" export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')"
export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}')
docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddle:gpu-latest docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddle:gpu-latest
The default entry point of all our Docker images starts the OpenSSH server. To run PaddlePaddle and to expose OpenSSH port to 2202 on the host computer:
Some notes for docker .. code-block:: bash
---------------------
Performance
+++++++++++
Since Docker is based on the lightweight virtual containers, the CPU computing performance maintains well. And GPU driver and equipments are all mapped to the container, so the GPU computing performance would not be seriously affected.
If you use high performance nic, such as RDMA(RoCE 40GbE or IB 56GbE), Ethernet(10GbE), it is recommended to use config "-net = host".
Remote access
+++++++++++++
If you want to enable ssh access background, you need to build an image by yourself. Please refer to official guide https://docs.docker.com/engine/reference/builder/ for further information. docker run -d -p 2202:22 paddledev/paddle:cpu-latest
Following is a simple Dockerfile with ssh: Then we can login to the container using username :code:`root` and password :code:`root`:
.. literalinclude:: ../../doc_cn/build_and_install/install/paddle_ssh.Dockerfile .. code-block:: bash
Then you can build an image with Dockerfile and launch a container: ssh -p 2202 root@localhost
.. code-block:: bash
# cd into Dockerfile directory Build Docker images
docker build . -t paddle_ssh -------------------
# run container, and map host machine port 8022 to container port 22
docker run -d -p 8022:22 --name paddle_ssh_machine paddle_ssh
Now, you can ssh on port 8022 to access the container, username is root, password is also root: Developers might want to build Docker images from their local commit or from a tagged version. Suppose that your local repo is at :code:`~/work/Paddle`, the following steps builds a cpu variant from your current work:
.. code-block:: bash .. code-block:: bash
ssh -p 8022 root@YOUR_HOST_MACHINE cd ~/Paddle
./paddle/scripts/docker/generates.sh # Use m4 to generate Dockerfiles for each variant.
docker build -t paddle:latest -f ./paddle/scripts/docker/Dockerfile.cpu
You can stop and delete the container as following: As a release engineer, you might want to build Docker images for a certain version and publish them to dockerhub.com. You can do this by switching to the right Git tag, or create a new tag, before running `docker build`. For example, the following commands build Docker images for v0.9.0:
.. code-block:: bash .. code-block:: bash
# stop cd ~/Paddle
docker stop paddle_ssh_machine git checkout tags/v0.9.0
# delete ./paddle/scripts/docker/generates.sh # Use m4 to generate Dockerfiles for each variant.
docker rm paddle_ssh_machine docker build -t paddle:cpu-v0.9.0 -f ./paddle/scripts/docker/Dockerfile.cpu
FROM ubuntu:14.04
MAINTAINER PaddlePaddle Dev Team <paddle-dev@baidu.com>
COPY build.sh /root/
ENV GIT_CHECKOUT=v0.9.0a0
ENV WITH_GPU=OFF
ENV IS_DEVEL=OFF
ENV WITH_DEMO=OFF
ENV PIP_INSTALL_ARGS ""
ENV PIP_GENERAL_ARGS ""
ENV USE_UBUNTU_MIRROR OFF
ENV WITH_AVX=ON
RUN cd /root/ && bash build.sh
FROM ubuntu:14.04
MAINTAINER PaddlePaddle Dev Team <paddle-dev@baidu.com>
COPY build.sh /root/
ENV GIT_CHECKOUT=v0.9.0a0
ENV WITH_GPU=OFF
ENV IS_DEVEL=ON
ENV WITH_DEMO=ON
ENV PIP_INSTALL_ARGS ""
ENV PIP_GENERAL_ARGS ""
ENV USE_UBUNTU_MIRROR OFF
ENV WITH_AVX=ON
RUN cd /root/ && bash build.sh
FROM ubuntu:14.04
MAINTAINER PaddlePaddle Dev Team <paddle-dev@baidu.com>
COPY build.sh /root/
ENV GIT_CHECKOUT=v0.9.0a0
ENV WITH_GPU=OFF
ENV IS_DEVEL=ON
ENV WITH_DEMO=OFF
ENV PIP_INSTALL_ARGS ""
ENV PIP_GENERAL_ARGS ""
ENV USE_UBUNTU_MIRROR OFF
ENV WITH_AVX=ON
RUN cd /root/ && bash build.sh
FROM ubuntu:14.04
MAINTAINER PaddlePaddle Dev Team <paddle-dev@baidu.com>
COPY build.sh /root/
ENV GIT_CHECKOUT=v0.9.0a0
ENV WITH_GPU=OFF
ENV IS_DEVEL=OFF
ENV WITH_DEMO=OFF
ENV PIP_INSTALL_ARGS ""
ENV PIP_GENERAL_ARGS ""
ENV USE_UBUNTU_MIRROR OFF
ENV WITH_AVX=OFF
RUN cd /root/ && bash build.sh
FROM ubuntu:14.04
MAINTAINER PaddlePaddle Dev Team <paddle-dev@baidu.com>
COPY build.sh /root/
ENV GIT_CHECKOUT=v0.9.0a0
ENV WITH_GPU=OFF
ENV IS_DEVEL=ON
ENV WITH_DEMO=ON
ENV PIP_INSTALL_ARGS ""
ENV PIP_GENERAL_ARGS ""
ENV USE_UBUNTU_MIRROR OFF
ENV WITH_AVX=OFF
RUN cd /root/ && bash build.sh
FROM ubuntu:14.04
MAINTAINER PaddlePaddle Dev Team <paddle-dev@baidu.com>
COPY build.sh /root/
ENV GIT_CHECKOUT=v0.9.0a0
ENV WITH_GPU=OFF
ENV IS_DEVEL=ON
ENV WITH_DEMO=OFF
ENV PIP_INSTALL_ARGS ""
ENV PIP_GENERAL_ARGS ""
ENV USE_UBUNTU_MIRROR OFF
ENV WITH_AVX=OFF
RUN cd /root/ && bash build.sh
FROM nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04
MAINTAINER PaddlePaddle Dev Team <paddle-dev@baidu.com>
COPY build.sh /root/
ENV GIT_CHECKOUT=v0.9.0a0
ENV WITH_GPU=ON
ENV IS_DEVEL=OFF
ENV WITH_DEMO=OFF
ENV PIP_INSTALL_ARGS ""
ENV PIP_GENERAL_ARGS ""
ENV USE_UBUNTU_MIRROR OFF
ENV WITH_AVX=ON
RUN cd /root/ && bash build.sh
FROM nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04
MAINTAINER PaddlePaddle Dev Team <paddle-dev@baidu.com>
COPY build.sh /root/
ENV GIT_CHECKOUT=v0.9.0a0
ENV WITH_GPU=ON
ENV IS_DEVEL=ON
ENV WITH_DEMO=ON
ENV PIP_INSTALL_ARGS ""
ENV PIP_GENERAL_ARGS ""
ENV USE_UBUNTU_MIRROR OFF
ENV WITH_AVX=ON
RUN cd /root/ && bash build.sh
FROM nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04
MAINTAINER PaddlePaddle Dev Team <paddle-dev@baidu.com>
COPY build.sh /root/
ENV GIT_CHECKOUT=v0.9.0a0
ENV WITH_GPU=ON
ENV IS_DEVEL=ON
ENV WITH_DEMO=OFF
ENV PIP_INSTALL_ARGS ""
ENV PIP_GENERAL_ARGS ""
ENV USE_UBUNTU_MIRROR OFF
ENV WITH_AVX=ON
RUN cd /root/ && bash build.sh
FROM nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04
MAINTAINER PaddlePaddle Dev Team <paddle-dev@baidu.com>
COPY build.sh /root/
ENV GIT_CHECKOUT=v0.9.0a0
ENV WITH_GPU=ON
ENV IS_DEVEL=OFF
ENV WITH_DEMO=OFF
ENV PIP_INSTALL_ARGS ""
ENV PIP_GENERAL_ARGS ""
ENV USE_UBUNTU_MIRROR OFF
ENV WITH_AVX=OFF
RUN cd /root/ && bash build.sh
FROM nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04
MAINTAINER PaddlePaddle Dev Team <paddle-dev@baidu.com>
COPY build.sh /root/
ENV GIT_CHECKOUT=v0.9.0a0
ENV WITH_GPU=ON
ENV IS_DEVEL=ON
ENV WITH_DEMO=ON
ENV PIP_INSTALL_ARGS ""
ENV PIP_GENERAL_ARGS ""
ENV USE_UBUNTU_MIRROR OFF
ENV WITH_AVX=OFF
RUN cd /root/ && bash build.sh
FROM nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04
MAINTAINER PaddlePaddle Dev Team <paddle-dev@baidu.com>
COPY build.sh /root/
ENV GIT_CHECKOUT=v0.9.0a0
ENV WITH_GPU=ON
ENV IS_DEVEL=ON
ENV WITH_DEMO=OFF
ENV PIP_INSTALL_ARGS ""
ENV PIP_GENERAL_ARGS ""
ENV USE_UBUNTU_MIRROR OFF
ENV WITH_AVX=OFF
RUN cd /root/ && bash build.sh
FROM PADDLE_BASE_IMAGE FROM PADDLE_BASE_IMAGE
MAINTAINER PaddlePaddle Dev Team <paddle-dev@baidu.com> MAINTAINER PaddlePaddle Dev Team <paddle-dev@baidu.com>
COPY build.sh /root/
ENV GIT_CHECKOUT=v0.9.0 # It is good to run apt-get install with Dockerfile RUN directive,
# because if the following invocation to /root/build.sh fails, `docker
# build` wouldn't have to re-install packages after we fix
# /root/build.sh. For more about Docker build cache, please refer to
# https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache.
RUN apt-get update && \
apt-get install -y cmake libprotobuf-dev protobuf-compiler git \
libgoogle-glog-dev libgflags-dev libatlas-dev libatlas3-base g++ m4 python-pip \
python-protobuf python-numpy python-dev swig openssh-server \
wget unzip python-matplotlib tar xz-utils bzip2 gzip coreutils \
sed grep graphviz libjpeg-dev zlib1g-dev doxygen && \
apt-get clean -y
RUN pip install BeautifulSoup docopt PyYAML pillow \
'sphinx>=1.4.0' sphinx_rtd_theme breathe recommonmark
ENV WITH_GPU=PADDLE_WITH_GPU ENV WITH_GPU=PADDLE_WITH_GPU
ENV IS_DEVEL=PADDLE_IS_DEVEL
ENV WITH_DEMO=PADDLE_WITH_DEMO
ENV PIP_INSTALL_ARGS ""
ENV PIP_GENERAL_ARGS ""
ENV USE_UBUNTU_MIRROR OFF
ENV WITH_AVX=PADDLE_WITH_AVX ENV WITH_AVX=PADDLE_WITH_AVX
RUN cd /root/ && bash build.sh
RUN mkdir /paddle
COPY . /paddle/
COPY paddle/scripts/docker/build.sh /root/
RUN /root/build.sh
RUN echo 'export LD_LIBRARY_PATH=/usr/lib64:${LD_LIBRARY_PATH}' >> /etc/profile
RUN pip install /usr/local/opt/paddle/share/wheels/*.whl
RUN paddle version # print version after build
# Configure OpenSSH server. c.f. https://docs.docker.com/engine/examples/running_ssh_service
RUN mkdir /var/run/sshd
RUN echo 'root:root' | chpasswd
RUN sed -ri 's/^PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config
RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config
EXPOSE 22
CMD ["/usr/sbin/sshd", "-D"]
...@@ -7,43 +7,21 @@ function abort(){ ...@@ -7,43 +7,21 @@ function abort(){
trap 'abort' 0 trap 'abort' 0
set -e set -e
if [ ${USE_UBUNTU_MIRROR} == "ON" ]; then
sed -i 's#http://archive\.ubuntu\.com/ubuntu/#mirror://mirrors\.ubuntu\.com/mirrors\.txt#g'\
/etc/apt/sources.list
fi
apt-get update
apt-get install -y cmake libprotobuf-dev protobuf-compiler git \
libgoogle-glog-dev libgflags-dev libatlas-dev libatlas3-base g++ m4 python-pip\
python-protobuf python-numpy python-dev swig
if [ ${WITH_GPU} == 'ON' ]; then if [ ${WITH_GPU} == 'ON' ]; then
ln -s /usr/lib/x86_64-linux-gnu/libcudnn.so /usr/lib/libcudnn.so ln -s /usr/lib/x86_64-linux-gnu/libcudnn.so /usr/lib/libcudnn.so
fi fi
cd ~ mkdir -p /paddle/build # -p means no error if exists
git clone https://github.com/PaddlePaddle/Paddle.git paddle cd /paddle/build
cd paddle cmake .. \
git checkout ${GIT_CHECKOUT} -DWITH_DOC=ON \
mkdir build -DWITH_GPU=${WITH_GPU} \
cd build -DWITH_AVX=${WITH_AVX} \
cmake .. -DWITH_DOC=OFF -DWITH_GPU=${WITH_GPU} -DWITH_SWIG_PY=ON\ -DWITH_SWIG_PY=ON \
-DCUDNN_ROOT=/usr/ -DWITH_STYLE_CHECK=OFF -DWITH_AVX=${WITH_AVX} -DCUDNN_ROOT=/usr/ \
-DWITH_STYLE_CHECK=OFF
make -j `nproc` make -j `nproc`
# because durning make install, there are several warning, so set +e, do not cause abort
make install make install
echo 'export LD_LIBRARY_PATH=/usr/lib64:${LD_LIBRARY_PATH}' >> /etc/profile
pip ${PIP_GENERAL_ARGS} install ${PIP_INSTALL_ARGS} /usr/local/opt/paddle/share/wheels/*.whl
paddle version # print version after build
if [ ${WITH_DEMO} == "ON" ]; then
apt-get install -y wget unzip perl python-matplotlib tar xz-utils bzip2 gzip coreutils\
sed grep graphviz libjpeg-dev zlib1g-dev
pip ${PIP_GENERAL_ARGS} install ${PIP_INSTALL_ARGS} BeautifulSoup docopt \
PyYAML pillow
fi
if [ ${IS_DEVEL} == "OFF" ]; then # clean build packages.
cd ~
rm -rf paddle
fi
apt-get clean -y
trap : 0 trap : 0
#!/bin/bash #!/bin/bash
set -e set -e
cd `dirname $0` cd `dirname $0`
m4 -DPADDLE_WITH_GPU=OFF -DPADDLE_IS_DEVEL=OFF -DPADDLE_WITH_DEMO=OFF \
-DPADDLE_BASE_IMAGE=ubuntu:14.04 -DPADDLE_WITH_AVX=ON\ m4 -DPADDLE_WITH_GPU=OFF \
-DPADDLE_WITH_AVX=ON \
-DPADDLE_BASE_IMAGE=ubuntu:14.04 \
Dockerfile.m4 > Dockerfile.cpu Dockerfile.m4 > Dockerfile.cpu
m4 -DPADDLE_WITH_GPU=OFF -DPADDLE_IS_DEVEL=OFF -DPADDLE_WITH_DEMO=OFF \ m4 -DPADDLE_WITH_GPU=OFF \
-DPADDLE_BASE_IMAGE=ubuntu:14.04 -DPADDLE_WITH_AVX=OFF\ -DPADDLE_WITH_AVX=OFF \
-DPADDLE_BASE_IMAGE=ubuntu:14.04 \
Dockerfile.m4 > Dockerfile.cpu-noavx Dockerfile.m4 > Dockerfile.cpu-noavx
m4 -DPADDLE_WITH_GPU=OFF -DPADDLE_IS_DEVEL=ON -DPADDLE_WITH_DEMO=OFF \ m4 -DPADDLE_WITH_GPU=ON \
-DPADDLE_BASE_IMAGE=ubuntu:14.04 -DPADDLE_WITH_AVX=OFF\
Dockerfile.m4 > Dockerfile.cpu-noavx-devel
m4 -DPADDLE_WITH_GPU=OFF -DPADDLE_IS_DEVEL=ON -DPADDLE_WITH_DEMO=OFF \
-DPADDLE_BASE_IMAGE=ubuntu:14.04 -DPADDLE_WITH_AVX=ON\
Dockerfile.m4 > Dockerfile.cpu-devel
m4 -DPADDLE_WITH_GPU=OFF -DPADDLE_IS_DEVEL=ON -DPADDLE_WITH_DEMO=ON \
-DPADDLE_BASE_IMAGE=ubuntu:14.04 -DPADDLE_WITH_AVX=ON\
Dockerfile.m4 > Dockerfile.cpu-demo
m4 -DPADDLE_WITH_GPU=OFF -DPADDLE_IS_DEVEL=ON -DPADDLE_WITH_DEMO=ON \
-DPADDLE_BASE_IMAGE=ubuntu:14.04 -DPADDLE_WITH_AVX=OFF\
Dockerfile.m4 > Dockerfile.cpu-noavx-demo
m4 -DPADDLE_WITH_GPU=ON -DPADDLE_IS_DEVEL=OFF -DPADDLE_WITH_DEMO=OFF \
-DPADDLE_BASE_IMAGE=nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04 \
-DPADDLE_WITH_AVX=ON \ -DPADDLE_WITH_AVX=ON \
Dockerfile.m4 > Dockerfile.gpu
m4 -DPADDLE_WITH_GPU=ON -DPADDLE_IS_DEVEL=OFF -DPADDLE_WITH_DEMO=OFF \
-DPADDLE_BASE_IMAGE=nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04 \ -DPADDLE_BASE_IMAGE=nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04 \
-DPADDLE_WITH_AVX=OFF \ Dockerfile.m4 > Dockerfile.gpu
Dockerfile.m4 > Dockerfile.gpu-noavx
m4 -DPADDLE_WITH_GPU=ON -DPADDLE_IS_DEVEL=ON -DPADDLE_WITH_DEMO=OFF \
-DPADDLE_BASE_IMAGE=nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04 \
-DPADDLE_WITH_AVX=ON \
Dockerfile.m4 > Dockerfile.gpu-devel
m4 -DPADDLE_WITH_GPU=ON -DPADDLE_IS_DEVEL=ON -DPADDLE_WITH_DEMO=OFF \ m4 -DPADDLE_WITH_GPU=ON \
-DPADDLE_BASE_IMAGE=nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04 \
-DPADDLE_WITH_AVX=OFF \ -DPADDLE_WITH_AVX=OFF \
Dockerfile.m4 > Dockerfile.gpu-noavx-devel
m4 -DPADDLE_WITH_GPU=ON -DPADDLE_IS_DEVEL=ON -DPADDLE_WITH_DEMO=ON \
-DPADDLE_BASE_IMAGE=nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04 \ -DPADDLE_BASE_IMAGE=nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04 \
-DPADDLE_WITH_AVX=ON \ Dockerfile.m4 > Dockerfile.gpu-noavx
Dockerfile.m4 > Dockerfile.gpu-demo
m4 -DPADDLE_WITH_GPU=ON -DPADDLE_IS_DEVEL=ON -DPADDLE_WITH_DEMO=ON \
-DPADDLE_BASE_IMAGE=nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04 \
-DPADDLE_WITH_AVX=OFF \
Dockerfile.m4 > Dockerfile.gpu-noavx-demo
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部