Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
b67c43ec
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b67c43ec
编写于
1月 18, 2017
作者:
D
dangqingqing
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into pad_op
上级
8d47499e
7000cb61
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
271 addition
and
81 deletion
+271
-81
cmake/external/swig.cmake
cmake/external/swig.cmake
+5
-18
demo/traffic_prediction/predict.sh
demo/traffic_prediction/predict.sh
+1
-1
doc/getstarted/build_and_install/build_from_source_en.md
doc/getstarted/build_and_install/build_from_source_en.md
+13
-7
paddle/math/RowBuffer.h
paddle/math/RowBuffer.h
+135
-0
paddle/math/SparseRowMatrix.h
paddle/math/SparseRowMatrix.h
+15
-27
paddle/math/tests/CMakeLists.txt
paddle/math/tests/CMakeLists.txt
+1
-0
paddle/math/tests/test_RowBuffer.cpp
paddle/math/tests/test_RowBuffer.cpp
+65
-0
paddle/scripts/docker/Dockerfile
paddle/scripts/docker/Dockerfile
+18
-14
paddle/scripts/docker/Dockerfile.gpu
paddle/scripts/docker/Dockerfile.gpu
+18
-14
未找到文件。
cmake/external/swig.cmake
浏览文件 @
b67c43ec
...
...
@@ -38,14 +38,6 @@ IF(NOT SWIG_FOUND)
SET
(
SWIG_DIR
${
SWIG_SOURCES_DIR
}
CACHE FILEPATH
"SWIG Directory"
FORCE
)
SET
(
SWIG_EXECUTABLE
${
SWIG_SOURCES_DIR
}
/swig.exe CACHE FILEPATH
"SWIG Executable"
FORCE
)
ELSE
(
WIN32
)
# From PCRE configure
ExternalProject_Add
(
pcre
${
EXTERNAL_PROJECT_LOG_ARGS
}
GIT_REPOSITORY https://github.com/svn2github/pcre.git
PREFIX
${
SWIG_SOURCES_DIR
}
/pcre
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=
${
SWIG_INSTALL_DIR
}
/pcre
)
# swig uses bison find it by cmake and pass it down
FIND_PACKAGE
(
BISON
)
...
...
@@ -54,16 +46,11 @@ IF(NOT SWIG_FOUND)
GIT_REPOSITORY https://github.com/swig/swig.git
GIT_TAG rel-3.0.10
PREFIX
${
SWIG_SOURCES_DIR
}
CONFIGURE_COMMAND cd
${
SWIG_SOURCES_DIR
}
/src/swig && ./autogen.sh
CONFIGURE_COMMAND cd
${
SWIG_SOURCES_DIR
}
/src/swig &&
env
"PCRE_LIBS=
${
SWIG_INSTALL_DIR
}
/pcre/lib/libpcre.a
${
SWIG_INSTALL_DIR
}
/pcre/lib/libpcrecpp.a
${
SWIG_INSTALL_DIR
}
/pcre/lib/libpcreposix.a"
./configure
--prefix=
${
SWIG_INSTALL_DIR
}
--with-pcre-prefix=
${
SWIG_INSTALL_DIR
}
/pcre
BUILD_COMMAND cd
${
SWIG_SOURCES_DIR
}
/src/swig && make
INSTALL_COMMAND cd
${
SWIG_SOURCES_DIR
}
/src/swig && make install
UPDATE_COMMAND
""
DEPENDS pcre
CONFIGURE_COMMAND cd <SOURCE_DIR> && ./autogen.sh && ./configure
--prefix=
${
SWIG_INSTALL_DIR
}
--without-pcre
BUILD_COMMAND cd <SOURCE_DIR> && make
INSTALL_COMMAND cd <SOURCE_DIR> && make install
UPDATE_COMMAND
""
)
SET
(
SWIG_DIR
${
SWIG_INSTALL_DIR
}
/share/swig/
${
SWIG_TARGET_VERSION
}
)
...
...
demo/traffic_prediction/predict.sh
浏览文件 @
b67c43ec
...
...
@@ -25,6 +25,6 @@ paddle train \
--config_args
=
is_predict
=
1
\
--predict_output_dir
=
.
python gen_result.py
>
result.
txt
python gen_result.py
>
result.
csv
rm
-rf
rank-00000
doc/getstarted/build_and_install/build_from_source_en.md
浏览文件 @
b67c43ec
...
...
@@ -18,9 +18,10 @@ cd paddle
To compile the source code, your computer must be equipped with the following dependencies.
-
**Compiler**
: GCC >= 4.8 or Clang >= 3.3 (AppleClang >= 5.1)
-
**CMake**
:
version
>= 3.0 (at least CMake 3.4 on Mac OS X)
-
**Compiler**
: GCC >= 4.8 or Clang >= 3.3 (AppleClang >= 5.1)
and gfortran compiler
-
**CMake**
:
CMake
>= 3.0 (at least CMake 3.4 on Mac OS X)
-
**BLAS**
: MKL, OpenBlas or ATLAS
-
**Python**
: only support Python 2.7
**Note:**
For CUDA 7.0 and CUDA 7.5, GCC 5.0 and up are not supported!
For CUDA 8.0, GCC versions later than 5.3 are not supported!
...
...
@@ -97,16 +98,21 @@ As a simple example, consider the following:
### Install Dependencies
-
**
CPU
Dependencies**
-
**
Paddle
Dependencies**
```bash
# necessary
sudo apt-get update
sudo apt-get install -y g++ make cmake build-essential python python-pip libpython-dev git
sudo pip install wheel numpy
sudo pip install 'protobuf>=3.0.0'
sudo apt-get install -y git curl gcc g++ gfortran make build-essential automake
sudo apt-get install -y python python-pip python-numpy libpython-dev bison
sudo pip install 'protobuf==3.1.0.post1'
# install cmake 3.4
curl -sSL https://cmake.org/files/v3.4/cmake-3.4.1.tar.gz | tar -xz && \
cd cmake-3.4.1 && ./bootstrap && make -j4 && sudo make install && \
cd .. && rm -rf cmake-3.4.1
```
-
**GPU Dependencies (optional)**
To build GPU version, you will need the following installed:
...
...
paddle/math/RowBuffer.h
0 → 100644
浏览文件 @
b67c43ec
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "MemoryHandle.h"
#include "paddle/utils/Util.h"
namespace
paddle
{
/**
* @brief The RowBuffer class
* Represent the SparseRow Matrix Data.
*
* If not set memory handler, then the data could be auto growth.
*/
class
RowBuffer
{
public:
/**
* @brief RowBuffer create a auto-growth row buffer. The row length is width.
* @param width the length of each row, a.k.a matrix width.
*/
explicit
RowBuffer
(
size_t
width
)
:
width_
(
width
)
{}
/**
* @brief RowBuffer create a row buffer, which cannot be auto-growth.
* @param mem the pre-allocated memory.
* @param width the length of each row, a.k.a matrix width.
*/
RowBuffer
(
const
CpuMemHandlePtr
&
mem
,
size_t
width
)
:
preallocatedBuf_
(
mem
),
width_
(
width
)
{}
/**
* @brief resize resize the buffer with rowCount
* @param rowCnt number of row. matrix height.
*/
inline
void
resize
(
int
rowCnt
)
{
if
(
preallocatedBuf_
)
{
CHECK
(
preallocatedBuf_
->
getSize
()
>=
rowCnt
*
width_
*
sizeof
(
real
));
}
else
{
rowStore_
.
resize
(
rowCnt
*
width_
);
}
}
/**
* @brief get a row buffer with row index.
* @param row the index of row.
* @return row buffer.
*/
inline
real
*
get
(
int
row
)
const
{
if
(
preallocatedBuf_
)
{
CHECK_LE
((
row
+
1
)
*
width_
*
sizeof
(
real
),
preallocatedBuf_
->
getSize
());
return
reinterpret_cast
<
real
*>
(
preallocatedBuf_
->
getBuf
())
+
row
*
width_
;
}
else
{
CHECK_LE
((
row
+
1
)
*
width_
,
rowStore_
.
size
());
return
const_cast
<
real
*>
(
rowStore_
.
data
()
+
row
*
width_
);
}
}
/**
* @brief get a row buffer with row index. If row index is larger than local
* buffer, the size of local buffer will grow.
* @param row the index of row.
* @return row buffer.
*/
inline
real
*
getWithAutoGrowth
(
int
row
)
{
if
(
preallocatedBuf_
)
{
return
get
(
row
);
}
else
{
if
((
rowStore_
.
size
()
<=
row
*
width_
))
{
rowStore_
.
resize
((
row
+
1
)
*
width_
);
}
return
rowStore_
.
data
()
+
row
*
width_
;
}
}
/**
* @return raw data buffer.
*/
inline
real
*
data
()
{
if
(
preallocatedBuf_
)
{
return
reinterpret_cast
<
real
*>
(
preallocatedBuf_
->
getBuf
());
}
else
{
return
rowStore_
.
data
();
}
}
/**
* @brief clear local buffer. It only affect auto-growth buffer.
*/
inline
void
clear
()
{
rowStore_
.
clear
();
}
/**
* @brief get current number of rows.
* @return number of rows.
*/
inline
size_t
getRowCount
()
const
{
if
(
preallocatedBuf_
)
{
return
preallocatedBuf_
->
getSize
()
/
sizeof
(
real
)
/
width_
;
}
else
{
return
rowStore_
.
size
()
/
width_
;
}
}
/**
* @brief get is this buffer can automatically grow or not.
* @return ture if can automacitally grow.
*/
inline
bool
isAutoGrowth
()
const
{
return
!
preallocatedBuf_
;
}
/**
* @brief return the width of matrix. a.k.a length of row.
* @return width of matrix
*/
inline
size_t
getWidth
()
const
{
return
width_
;
}
private:
//! TODO(yuyang18): Add resize method to CpuMemHandlePtr, then we can get rid
//! of std::vector here.
CpuMemHandlePtr
preallocatedBuf_
;
std
::
vector
<
real
,
AlignedAllocator
<
real
,
32
>>
rowStore_
;
size_t
width_
;
};
}
// namespace paddle
paddle/math/SparseRowMatrix.h
浏览文件 @
b67c43ec
...
...
@@ -18,6 +18,7 @@ limitations under the License. */
#include <string.h>
#include <algorithm>
#include "Matrix.h"
#include "RowBuffer.h"
#include "paddle/utils/Util.h"
DECLARE_bool
(
allow_inefficient_sparse_update
);
...
...
@@ -45,12 +46,9 @@ public:
IndexDictPtr
indexDictHandle
=
nullptr
,
bool
trans
=
false
)
:
CpuMatrix
(
nullptr
,
height
,
width
,
trans
),
storeMat_
(
dataHandle
,
dataHandle
?
dataHandle
->
getSize
()
/
sizeof
(
real
)
/
width
:
0
,
width
,
trans
),
indexDictHandle_
(
indexDictHandle
)
{
init
(
height
,
width
);
buf_
.
reset
(
new
RowBuffer
(
dataHandle
,
width
));
}
virtual
~
SparseRowCpuMatrix
()
{}
...
...
@@ -71,25 +69,16 @@ public:
*
* @param row row id in local storage
*/
real
*
getLocalRow
(
size_t
row
)
{
if
(
storeMat_
.
getData
())
return
storeMat_
.
rowBuf
(
row
);
if
(
rowStore_
.
size
()
<=
row
*
width_
)
{
rowStore_
.
resize
((
row
+
1
)
*
width_
);
}
return
rowStore_
.
data
()
+
row
*
width_
;
}
real
*
getLocalRow
(
size_t
row
)
{
return
buf_
->
getWithAutoGrowth
(
row
);
}
/**
* reserve the storage for rows according to current size of indexDictHandle.
* reserve the storage for rows according to current size of
* indexDictHandle.
*
* This is only used when SparseRowCpuMatrix is constructed with
* indexDictHandle.
*/
void
reserveStore
()
{
if
(
!
storeMat_
.
getData
()
&&
!
localIndices_
->
empty
())
{
rowStore_
.
resize
(
localIndices_
->
size
()
*
width_
);
}
}
void
reserveStore
()
{
buf_
->
resize
(
localIndices_
->
size
());
}
// row is the row id in the original matrix
virtual
real
*
getRowBuf
(
size_t
row
)
{
return
getRow
(
row
);
}
...
...
@@ -117,7 +106,8 @@ public:
*
* If L1 decay set use L1, else if L2 set use L2, otherwise no decay atall.
*
* t0 is a int vector used by L1/L2 decay, size = height of parameter matrix,
* t0 is a int vector used by L1/L2 decay, size = height of parameter
* matrix,
* store the time that each weight row last updated.
*
* Time is batchId, currentTime is current batchId.
...
...
@@ -176,8 +166,7 @@ public:
protected:
template
<
typename
Func
>
void
apply
(
Func
f
)
{
real
*
data
=
storeMat_
.
getData
()
?
storeMat_
.
getData
()
:
rowStore_
.
data
();
f
(
data
,
localIndices_
->
size
()
*
width_
);
f
(
buf_
->
data
(),
localIndices_
->
size
()
*
width_
);
}
void
init
(
size_t
height
,
size_t
width
);
...
...
@@ -188,25 +177,24 @@ protected:
globalIndices_
[
id
]
=
kUnusedId_
;
}
localIndices_
->
clear
();
rowStore_
.
clear
();
buf_
->
clear
();
}
inline
void
checkStoreSize
()
{
if
(
storeMat_
.
getData
())
{
CHECK_LE
(
localIndices_
->
size
(),
storeMat_
.
getHeight
());
}
else
if
(
!
FLAGS_allow_inefficient_sparse_update
)
{
if
(
localIndices_
->
size
()
>
0.5
*
height_
)
{
if
(
buf_
->
isAutoGrowth
())
{
if
(
buf_
->
getRowCount
()
>
0.5
*
height_
)
{
LOG
(
WARNING
)
<<
"There are more than 0.5*height ("
<<
localIndices_
->
size
()
<<
") rows are used for sparse "
<<
"update, which is not efficient. Considering not use "
<<
"sparse_update or set --allow_inefficient_sparse_update=true"
;
}
}
else
{
CHECK_LE
(
localIndices_
->
size
(),
buf_
->
getRowCount
());
}
}
CpuMatrix
storeMat_
;
std
::
vector
<
real
,
AlignedAllocator
<
real
,
32
>>
rowStore_
;
std
::
unique_ptr
<
RowBuffer
>
buf_
;
IndexDictPtr
indexDictHandle_
;
std
::
vector
<
unsigned
int
>*
localIndices_
;
// =&indexDictHandle_->localIndices
unsigned
int
*
globalIndices_
;
// =indexDictHandle_->globalIndices.data();
...
...
paddle/math/tests/CMakeLists.txt
浏览文件 @
b67c43ec
...
...
@@ -4,6 +4,7 @@ add_simple_unittest(test_ExecViaCpu)
add_simple_unittest
(
test_SIMDFunctions
)
add_simple_unittest
(
test_TrainingAlgorithm
)
add_simple_unittest
(
test_SparseMatrix
)
add_simple_unittest
(
test_RowBuffer
)
# TODO(yuyang18): Refactor TestUtil.cpp. Remove this cross module reference.
add_unittest
(
test_matrixCompare
...
...
paddle/math/tests/test_RowBuffer.cpp
0 → 100644
浏览文件 @
b67c43ec
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include "paddle/math/RowBuffer.h"
TEST
(
RowBuffer
,
testAutoGrow
)
{
paddle
::
RowBuffer
buf
(
128
);
ASSERT_EQ
(
128
,
buf
.
getWidth
());
ASSERT_TRUE
(
buf
.
isAutoGrowth
());
buf
.
resize
(
2
);
ASSERT_EQ
(
2
,
buf
.
getRowCount
());
for
(
size_t
i
=
0
;
i
<
buf
.
getWidth
()
*
2
;
++
i
)
{
buf
.
data
()[
i
]
=
i
;
}
for
(
size_t
i
=
0
;
i
<
buf
.
getRowCount
();
++
i
)
{
for
(
size_t
j
=
0
;
j
<
buf
.
getWidth
();
++
j
)
{
ASSERT_NEAR
(
i
*
buf
.
getWidth
()
+
j
,
buf
.
get
(
i
)[
j
],
1e-5
);
}
}
auto
data
=
buf
.
getWithAutoGrowth
(
2
);
for
(
size_t
i
=
0
;
i
<
buf
.
getWidth
();
++
i
)
{
data
[
i
]
=
i
;
}
ASSERT_EQ
(
3
,
buf
.
getRowCount
());
for
(
size_t
i
=
0
;
i
<
buf
.
getRowCount
()
-
1
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
buf
.
getWidth
();
++
j
)
{
ASSERT_NEAR
(
i
*
buf
.
getWidth
()
+
j
,
buf
.
get
(
i
)[
j
],
1e-5
);
}
}
for
(
size_t
i
=
0
;
i
<
buf
.
getWidth
();
++
i
)
{
ASSERT_NEAR
(
i
,
buf
.
get
(
2
)[
i
],
1e-5
);
}
}
TEST
(
RowBuffer
,
testWithMemBuf
)
{
paddle
::
CpuMemHandlePtr
mem
=
std
::
make_shared
<
paddle
::
CpuMemoryHandle
>
(
128
*
2
*
sizeof
(
real
));
paddle
::
RowBuffer
buf
(
mem
,
128
);
ASSERT_TRUE
(
!
buf
.
isAutoGrowth
());
ASSERT_EQ
(
2
,
buf
.
getRowCount
());
for
(
size_t
i
=
0
;
i
<
buf
.
getWidth
()
*
2
;
++
i
)
{
buf
.
data
()[
i
]
=
i
;
}
for
(
size_t
i
=
0
;
i
<
buf
.
getRowCount
();
++
i
)
{
for
(
size_t
j
=
0
;
j
<
buf
.
getWidth
();
++
j
)
{
ASSERT_NEAR
(
i
*
buf
.
getWidth
()
+
j
,
buf
.
getWithAutoGrowth
(
i
)[
j
],
1e-5
);
}
}
ASSERT_DEATH_IF_SUPPORTED
(
buf
.
getWithAutoGrowth
(
3
),
".*"
);
}
paddle/scripts/docker/Dockerfile
浏览文件 @
b67c43ec
...
...
@@ -4,28 +4,32 @@ MAINTAINER PaddlePaddle Authors <paddle-dev@baidu.com>
ARG
DEBIAN_FRONTEND=noninteractive
ARG
UBUNTU_MIRROR
RUN
/bin/bash
-c
'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i '
s#http://archive.ubuntu.com#
${
UBUNTU_MIRROR
}
#g' /etc/apt/sources.list; fi'
RUN
apt-get update
\
&&
apt-get
install
-y
cmake libprotobuf-dev protobuf-compiler git
\
libgoogle-glog-dev libgflags-dev libgtest-dev
\
libatlas-dev libatlas3-base g++ m4 python-pip
\
python-protobuf python-numpy python-dev swig openssh-server
\
wget unzip python-matplotlib
tar
xz-utils bzip2
gzip
coreutils
\
sed grep
graphviz libjpeg-dev zlib1g-dev doxygen
\
clang-3.8 llvm-3.8 libclang-3.8-dev
\
&&
apt-get clean
-y
RUN
cd
/usr/src/gtest
&&
cmake
.
&&
make
&&
cp
*
.a /usr/lib
RUN
pip
install
-U
BeautifulSoup docopt PyYAML pillow
\
sphinx sphinx_rtd_theme recommonmark jupyter
RUN
apt-get update
&&
\
apt-get
install
-y
git python-pip python-dev openssh-server bison
&&
\
apt-get
install
-y
wget unzip
tar
xz-utils bzip2
gzip
coreutils
&&
\
apt-get
install
-y
curl
sed grep
graphviz libjpeg-dev zlib1g-dev
&&
\
apt-get
install
-y
python-numpy python-matplotlib gcc g++ gfortran
&&
\
apt-get
install
-y
automake clang-3.8 llvm-3.8 libclang-3.8-dev
&&
\
apt-get clean
-y
RUN
pip
install
--upgrade
pip
&&
\
pip install 'protobuf==3.1.0.post1' && \
pip install -U wheel pillow BeautifulSoup && \
pip install -U docopt PyYAML sphinx && \
pip install -U sphinx_rtd_theme recommonmark jupyter
RUN
curl
-sSL
https://cmake.org/files/v3.4/cmake-3.4.1.tar.gz |
tar
-xz
&&
\
cd
cmake-3.4.1
&&
./bootstrap
&&
make
-j4
&&
make
install
&&
\
cd
..
&&
rm
-rf
cmake-3.4.1
ARG
WITH_AVX
ARG
WITH_DOC
ARG
WITH_SWIG_PY
ARG
WITH_STYLE_CHECK
ENV
WITH_GPU=OFF
ENV
WITH_AVX=${WITH_AVX:-ON}
ENV
WITH_DOC=${WITH_DOC:-ON}
ENV
WITH_SWIG_PY=${WITH_SWIG_PY:-ON}
ENV
WITH_STYLE_CHECK=${WITH_STYLE_CHECK:-OFF}
RUN
mkdir
/paddle
...
...
paddle/scripts/docker/Dockerfile.gpu
浏览文件 @
b67c43ec
...
...
@@ -4,28 +4,32 @@ MAINTAINER PaddlePaddle Authors <paddle-dev@baidu.com>
ARG DEBIAN_FRONTEND=noninteractive
ARG UBUNTU_MIRROR
RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ubuntu.com#${UBUNTU_MIRROR}#g' /etc/apt/sources.list; fi'
RUN apt-get update \
&& apt-get install -y cmake libprotobuf-dev protobuf-compiler git \
libgoogle-glog-dev libgflags-dev libgtest-dev \
libatlas-dev libatlas3-base g++ m4 python-pip \
python-protobuf python-numpy python-dev swig openssh-server \
wget unzip python-matplotlib tar xz-utils bzip2 gzip coreutils \
sed grep graphviz libjpeg-dev zlib1g-dev doxygen \
clang-3.8 llvm-3.8 libclang-3.8-dev \
&& apt-get clean -y
RUN cd /usr/src/gtest && cmake . && make && cp *.a /usr/lib
RUN pip install -U BeautifulSoup docopt PyYAML pillow \
sphinx sphinx_rtd_theme recommonmark jupyter
RUN apt-get update && \
apt-get install -y git python-pip python-dev openssh-server bison && \
apt-get install -y wget unzip tar xz-utils bzip2 gzip coreutils && \
apt-get install -y curl sed grep graphviz libjpeg-dev zlib1g-dev && \
apt-get install -y python-numpy python-matplotlib gcc g++ gfortran && \
apt-get install -y automake clang-3.8 llvm-3.8 libclang-3.8-dev && \
apt-get clean -y
RUN pip install --upgrade pip && \
pip install 'protobuf==3.1.0.post1' && \
pip install -U wheel pillow BeautifulSoup && \
pip install -U docopt PyYAML sphinx && \
pip install -U sphinx_rtd_theme recommonmark jupyter
RUN curl -sSL https://cmake.org/files/v3.4/cmake-3.4.1.tar.gz | tar -xz && \
cd cmake-3.4.1 && ./bootstrap && make -j4 && make install && \
cd .. && rm -rf cmake-3.4.1
ARG WITH_AVX
ARG WITH_DOC
ARG WITH_SWIG_PY
ARG WITH_STYLE_CHECK
ENV WITH_GPU=ON
ENV WITH_AVX=${WITH_AVX:-ON}
ENV WITH_DOC=${WITH_DOC:-ON}
ENV WITH_SWIG_PY=${WITH_SWIG_PY:-ON}
ENV WITH_STYLE_CHECK=${WITH_STYLE_CHECK:-OFF}
RUN mkdir /paddle
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录