diff --git a/cmake/FindSphinx.cmake b/cmake/FindSphinx.cmake
index d319442ef10b38b9edf5844e5540a92c7094c7ce..1c29cb22a31f1e41a6b5575837c6374175cfdea5 100644
--- a/cmake/FindSphinx.cmake
+++ b/cmake/FindSphinx.cmake
@@ -72,7 +72,7 @@ function( Sphinx_add_target target_name builder conf cache source destination )
${source}
${destination}
COMMENT "Generating sphinx documentation: ${builder}"
- COMMAND ln -sf ${destination}/index_*.html ${destination}/index.html
+ COMMAND cd ${destination} && ln -s ./index_*.html index.html
)
set_property(
diff --git a/doc/api/index_cn.rst b/doc/api/index_cn.rst
index 3718cd73a2003b8ef6c406a9bd51dc68e76402dc..874dd9cb2278ce36a029b8745f2d82a7e642128e 100644
--- a/doc/api/index_cn.rst
+++ b/doc/api/index_cn.rst
@@ -1,37 +1,2 @@
-API中文手册
-============
-
-DataProvider API
-----------------
-
-.. toctree::
- :maxdepth: 1
-
- data_provider/dataprovider_cn.rst
- data_provider/pydataprovider2_cn.rst
-
-.. _api_trainer_config:
-
-Model Config API
-----------------
-
-.. toctree::
- :maxdepth: 1
-
- trainer_config_helpers/optimizers.rst
- trainer_config_helpers/data_sources.rst
- trainer_config_helpers/layers.rst
- trainer_config_helpers/activations.rst
- trainer_config_helpers/poolings.rst
- trainer_config_helpers/networks.rst
- trainer_config_helpers/evaluators.rst
- trainer_config_helpers/attrs.rst
-
-
-Applications API
-----------------
-
-.. toctree::
- :maxdepth: 1
-
- predict/swig_py_paddle_cn.rst
+API
+===
\ No newline at end of file
diff --git a/doc/api/index_en.rst b/doc/api/index_en.rst
index 10c297a71d6988c002de868e804ed9ee2345fbd7..b7f470e1f8a9a1c720e7d70832ec069339ddc60f 100644
--- a/doc/api/index_en.rst
+++ b/doc/api/index_en.rst
@@ -1,37 +1,10 @@
API
===
-DataProvider API
-----------------
-
-.. toctree::
- :maxdepth: 1
-
- data_provider/dataprovider_en.rst
- data_provider/pydataprovider2_en.rst
-
-.. _api_trainer_config:
-
Model Config API
----------------
.. toctree::
:maxdepth: 1
- trainer_config_helpers/optimizers.rst
- trainer_config_helpers/data_sources.rst
- trainer_config_helpers/layers.rst
- trainer_config_helpers/activations.rst
- trainer_config_helpers/poolings.rst
- trainer_config_helpers/networks.rst
- trainer_config_helpers/evaluators.rst
- trainer_config_helpers/attrs.rst
-
-
-Applications API
-----------------
-
-.. toctree::
- :maxdepth: 1
-
- predict/swig_py_paddle_en.rst
+ v2/model_configs.rst
\ No newline at end of file
diff --git a/doc/api/data_provider/dataprovider_cn.rst b/doc/api/v1/data_provider/dataprovider_cn.rst
similarity index 100%
rename from doc/api/data_provider/dataprovider_cn.rst
rename to doc/api/v1/data_provider/dataprovider_cn.rst
diff --git a/doc/api/data_provider/dataprovider_en.rst b/doc/api/v1/data_provider/dataprovider_en.rst
similarity index 100%
rename from doc/api/data_provider/dataprovider_en.rst
rename to doc/api/v1/data_provider/dataprovider_en.rst
diff --git a/doc/api/data_provider/pydataprovider2_cn.rst b/doc/api/v1/data_provider/pydataprovider2_cn.rst
similarity index 100%
rename from doc/api/data_provider/pydataprovider2_cn.rst
rename to doc/api/v1/data_provider/pydataprovider2_cn.rst
diff --git a/doc/api/data_provider/pydataprovider2_en.rst b/doc/api/v1/data_provider/pydataprovider2_en.rst
similarity index 100%
rename from doc/api/data_provider/pydataprovider2_en.rst
rename to doc/api/v1/data_provider/pydataprovider2_en.rst
diff --git a/doc/api/data_provider/src/mnist_config.py b/doc/api/v1/data_provider/src/mnist_config.py
similarity index 100%
rename from doc/api/data_provider/src/mnist_config.py
rename to doc/api/v1/data_provider/src/mnist_config.py
diff --git a/doc/api/data_provider/src/mnist_provider.dict.py b/doc/api/v1/data_provider/src/mnist_provider.dict.py
similarity index 100%
rename from doc/api/data_provider/src/mnist_provider.dict.py
rename to doc/api/v1/data_provider/src/mnist_provider.dict.py
diff --git a/doc/api/data_provider/src/mnist_train.txt b/doc/api/v1/data_provider/src/mnist_train.txt
similarity index 100%
rename from doc/api/data_provider/src/mnist_train.txt
rename to doc/api/v1/data_provider/src/mnist_train.txt
diff --git a/doc/api/data_provider/src/sentimental_config.py b/doc/api/v1/data_provider/src/sentimental_config.py
similarity index 100%
rename from doc/api/data_provider/src/sentimental_config.py
rename to doc/api/v1/data_provider/src/sentimental_config.py
diff --git a/doc/api/data_provider/src/sentimental_provider.py b/doc/api/v1/data_provider/src/sentimental_provider.py
similarity index 100%
rename from doc/api/data_provider/src/sentimental_provider.py
rename to doc/api/v1/data_provider/src/sentimental_provider.py
diff --git a/doc/api/data_provider/src/sentimental_train.txt b/doc/api/v1/data_provider/src/sentimental_train.txt
similarity index 100%
rename from doc/api/data_provider/src/sentimental_train.txt
rename to doc/api/v1/data_provider/src/sentimental_train.txt
diff --git a/doc/api/data_provider/src/train.list b/doc/api/v1/data_provider/src/train.list
similarity index 100%
rename from doc/api/data_provider/src/train.list
rename to doc/api/v1/data_provider/src/train.list
diff --git a/doc/api/v1/index_cn.rst b/doc/api/v1/index_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3718cd73a2003b8ef6c406a9bd51dc68e76402dc
--- /dev/null
+++ b/doc/api/v1/index_cn.rst
@@ -0,0 +1,37 @@
+API中文手册
+============
+
+DataProvider API
+----------------
+
+.. toctree::
+ :maxdepth: 1
+
+ data_provider/dataprovider_cn.rst
+ data_provider/pydataprovider2_cn.rst
+
+.. _api_trainer_config:
+
+Model Config API
+----------------
+
+.. toctree::
+ :maxdepth: 1
+
+ trainer_config_helpers/optimizers.rst
+ trainer_config_helpers/data_sources.rst
+ trainer_config_helpers/layers.rst
+ trainer_config_helpers/activations.rst
+ trainer_config_helpers/poolings.rst
+ trainer_config_helpers/networks.rst
+ trainer_config_helpers/evaluators.rst
+ trainer_config_helpers/attrs.rst
+
+
+Applications API
+----------------
+
+.. toctree::
+ :maxdepth: 1
+
+ predict/swig_py_paddle_cn.rst
diff --git a/doc/api/v1/index_en.rst b/doc/api/v1/index_en.rst
new file mode 100644
index 0000000000000000000000000000000000000000..10c297a71d6988c002de868e804ed9ee2345fbd7
--- /dev/null
+++ b/doc/api/v1/index_en.rst
@@ -0,0 +1,37 @@
+API
+===
+
+DataProvider API
+----------------
+
+.. toctree::
+ :maxdepth: 1
+
+ data_provider/dataprovider_en.rst
+ data_provider/pydataprovider2_en.rst
+
+.. _api_trainer_config:
+
+Model Config API
+----------------
+
+.. toctree::
+ :maxdepth: 1
+
+ trainer_config_helpers/optimizers.rst
+ trainer_config_helpers/data_sources.rst
+ trainer_config_helpers/layers.rst
+ trainer_config_helpers/activations.rst
+ trainer_config_helpers/poolings.rst
+ trainer_config_helpers/networks.rst
+ trainer_config_helpers/evaluators.rst
+ trainer_config_helpers/attrs.rst
+
+
+Applications API
+----------------
+
+.. toctree::
+ :maxdepth: 1
+
+ predict/swig_py_paddle_en.rst
diff --git a/doc/api/predict/src/predict_sample.py b/doc/api/v1/predict/src/predict_sample.py
similarity index 100%
rename from doc/api/predict/src/predict_sample.py
rename to doc/api/v1/predict/src/predict_sample.py
diff --git a/doc/api/predict/swig_py_paddle_cn.rst b/doc/api/v1/predict/swig_py_paddle_cn.rst
similarity index 100%
rename from doc/api/predict/swig_py_paddle_cn.rst
rename to doc/api/v1/predict/swig_py_paddle_cn.rst
diff --git a/doc/api/predict/swig_py_paddle_en.rst b/doc/api/v1/predict/swig_py_paddle_en.rst
similarity index 100%
rename from doc/api/predict/swig_py_paddle_en.rst
rename to doc/api/v1/predict/swig_py_paddle_en.rst
diff --git a/doc/api/trainer_config_helpers/activations.rst b/doc/api/v1/trainer_config_helpers/activations.rst
similarity index 100%
rename from doc/api/trainer_config_helpers/activations.rst
rename to doc/api/v1/trainer_config_helpers/activations.rst
diff --git a/doc/api/trainer_config_helpers/attrs.rst b/doc/api/v1/trainer_config_helpers/attrs.rst
similarity index 100%
rename from doc/api/trainer_config_helpers/attrs.rst
rename to doc/api/v1/trainer_config_helpers/attrs.rst
diff --git a/doc/api/trainer_config_helpers/data_sources.rst b/doc/api/v1/trainer_config_helpers/data_sources.rst
similarity index 100%
rename from doc/api/trainer_config_helpers/data_sources.rst
rename to doc/api/v1/trainer_config_helpers/data_sources.rst
diff --git a/doc/api/trainer_config_helpers/evaluators.rst b/doc/api/v1/trainer_config_helpers/evaluators.rst
similarity index 100%
rename from doc/api/trainer_config_helpers/evaluators.rst
rename to doc/api/v1/trainer_config_helpers/evaluators.rst
diff --git a/doc/api/trainer_config_helpers/layers.rst b/doc/api/v1/trainer_config_helpers/layers.rst
similarity index 100%
rename from doc/api/trainer_config_helpers/layers.rst
rename to doc/api/v1/trainer_config_helpers/layers.rst
diff --git a/doc/api/trainer_config_helpers/networks.rst b/doc/api/v1/trainer_config_helpers/networks.rst
similarity index 100%
rename from doc/api/trainer_config_helpers/networks.rst
rename to doc/api/v1/trainer_config_helpers/networks.rst
diff --git a/doc/api/trainer_config_helpers/optimizers.rst b/doc/api/v1/trainer_config_helpers/optimizers.rst
similarity index 100%
rename from doc/api/trainer_config_helpers/optimizers.rst
rename to doc/api/v1/trainer_config_helpers/optimizers.rst
diff --git a/doc/api/trainer_config_helpers/poolings.rst b/doc/api/v1/trainer_config_helpers/poolings.rst
similarity index 100%
rename from doc/api/trainer_config_helpers/poolings.rst
rename to doc/api/v1/trainer_config_helpers/poolings.rst
diff --git a/doc/api/v2/model_configs.rst b/doc/api/v2/model_configs.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a9f33b33ef61bf846013364672ec26ae075d0300
--- /dev/null
+++ b/doc/api/v2/model_configs.rst
@@ -0,0 +1,6 @@
+======
+Layers
+======
+
+.. automodule:: paddle.v2.layer
+ :members:
diff --git a/doc/templates/conf.py.cn.in b/doc/templates/conf.py.cn.in
index 418d718fbd9c61bff3acb9c2dab0638c0b650bab..6dc48704bc230bd1a573c4b4b2e7c07791e48ced 100644
--- a/doc/templates/conf.py.cn.in
+++ b/doc/templates/conf.py.cn.in
@@ -15,13 +15,19 @@ import sys
import os, subprocess
import shlex
from recommonmark import parser, transform
+try:
+ import py_paddle
+ import paddle
+ import paddle.v2
+except ImportError:
+ print("Must install paddle python package before generating documentation")
+ sys.exit(1)
MarkdownParser = parser.CommonMarkParser
AutoStructify = transform.AutoStructify
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.insert(0, '@PROJ_ROOT@/python')
templates_path = ["@PROJ_ROOT@/doc_theme/templates"]
# -- General configuration ------------------------------------------------
diff --git a/doc/templates/conf.py.en.in b/doc/templates/conf.py.en.in
index e96c25cb75bee20d2e2949423d80ddab1d3450a1..b477f0120c4fa0544012080b7cfb8572d3c44b04 100644
--- a/doc/templates/conf.py.en.in
+++ b/doc/templates/conf.py.en.in
@@ -15,14 +15,20 @@ import sys
import os, subprocess
import shlex
from recommonmark import parser, transform
+try:
+ import py_paddle
+ import paddle
+ import paddle.v2
+except ImportError:
+ print("Must install paddle python package before generating documentation")
+ sys.exit(1)
+
MarkdownParser = parser.CommonMarkParser
AutoStructify = transform.AutoStructify
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.insert(0, '@PROJ_ROOT@/python')
-
templates_path = ["@PROJ_ROOT@/doc_theme/templates"]
# -- General configuration ------------------------------------------------
diff --git a/doc/tutorials/quick_start/index_en.md b/doc/tutorials/quick_start/index_en.md
index 70dec2eb2a8c397bc56b1e6f52a624a3a6877905..ca110431cf921ae0480d3fb2b17c58f90a84cc0e 100644
--- a/doc/tutorials/quick_start/index_en.md
+++ b/doc/tutorials/quick_start/index_en.md
@@ -156,14 +156,14 @@ define_py_data_sources2(train_list='data/train.list',
obj="process",
args={"dictionary": word_dict})
```
-You can refer to the following link for more detailed examples and data formats: PyDataProvider2.
+You can refer to the following link for more detailed examples and data formats: PyDataProvider2.
## Network Architecture
We will describe four kinds of network architectures in this section.
![](./src/PipelineNetwork_en.jpg)
First, you will build a logistic regression model. Later, you will also get chance to build other more powerful network architectures.
-For more detailed documentation, you could refer to: layer documentation. All configuration files are in `demo/quick_start` directory.
+For more detailed documentation, you could refer to: layer documentation. All configuration files are in `demo/quick_start` directory.
### Logistic Regression
The architecture is illustrated in the following picture:
@@ -366,7 +366,7 @@ You can use single layer LSTM model with Dropout for our text classification pro
## Optimization Algorithm
-Optimization algorithms include Momentum, RMSProp, AdaDelta, AdaGrad, Adam, and Adamax. You can use Adam optimization method here, with L2 regularization and gradient clipping, because Adam has been proved to work very well for training recurrent neural network.
+Optimization algorithms include Momentum, RMSProp, AdaDelta, AdaGrad, Adam, and Adamax. You can use Adam optimization method here, with L2 regularization and gradient clipping, because Adam has been proved to work very well for training recurrent neural network.
```python
settings(batch_size=128,
@@ -407,7 +407,7 @@ paddle train \
--init_model_path=./output/pass-0000x
```
-We will give an example of performing prediction using Recurrent model on a dataset with no labels. You can refer to Python Prediction API tutorial,or other demo for the prediction process using Python. You can also use the following script for inference or evaluation.
+We will give an example of performing prediction using Recurrent model on a dataset with no labels. You can refer to Python Prediction API tutorial,or other demo for the prediction process using Python. You can also use the following script for inference or evaluation.
inference script (predict.sh):
diff --git a/paddle/api/Arguments.cpp b/paddle/api/Arguments.cpp
index a3f4bfffc9f074900ebcc52876c04bbfc0e570b2..d49b189e253f7a0792fe3f1fe7c8fdbb7071acd4 100644
--- a/paddle/api/Arguments.cpp
+++ b/paddle/api/Arguments.cpp
@@ -144,9 +144,7 @@ void Arguments::setSlotSequenceDim(size_t idx, IVector* vec) throw(RangeError) {
a.cpuSequenceDims = m->cast(vec->getSharedPtr());
}
-float Arguments::sumCosts() const {
- return paddle::Argument::sumCosts(m->outputs);
-}
+float Arguments::sum() const { return paddle::Argument::sum(m->outputs); }
int64_t Arguments::getBatchSize(size_t idx) const throw(RangeError) {
auto& a = m->getArg(idx);
diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h
index 762f86ac79461558b6a2eb7105ffd05961f5d3e2..c4f5dca26cc6a5e9fdd23ee27b594ced29a25c7a 100644
--- a/paddle/api/PaddleAPI.h
+++ b/paddle/api/PaddleAPI.h
@@ -453,7 +453,7 @@ public:
IVector* vec) throw(RangeError);
void setSlotSequenceDim(size_t idx, IVector* vec) throw(RangeError);
- float sumCosts() const;
+ float sum() const;
private:
static Arguments* createByPaddleArgumentVector(void* ptr);
diff --git a/paddle/api/test/testArguments.py b/paddle/api/test/testArguments.py
index a04a805d7a64ef906c8388f1241b9ef823e4d9e0..9fe44de94ea6ddb71d2dfbb2243fc86ede0d0531 100644
--- a/paddle/api/test/testArguments.py
+++ b/paddle/api/test/testArguments.py
@@ -22,7 +22,7 @@ class TestArguments(unittest.TestCase):
args = swig_paddle.Arguments.createArguments(1)
args.setSlotValue(0, m)
- self.assertAlmostEqual(27.0, args.sumCosts())
+ self.assertAlmostEqual(27.0, args.sum())
mat = args.getSlotValue(0)
assert isinstance(mat, swig_paddle.Matrix)
diff --git a/paddle/gserver/tests/LayerGradUtil.cpp b/paddle/gserver/tests/LayerGradUtil.cpp
index ae016e74eaa84f7c43a30c09c8c4577e25360c4e..7617af10ba719490d1b33dd297b070cd8c7c292c 100644
--- a/paddle/gserver/tests/LayerGradUtil.cpp
+++ b/paddle/gserver/tests/LayerGradUtil.cpp
@@ -24,7 +24,7 @@ real getCostSum(LayerPtr& testLayer, MatrixPtr weights) {
if (weights) {
outArgs[0].value->dotMul(*outArgs[0].value, *weights);
}
- return Argument::sumCosts(outArgs);
+ return Argument::sum(outArgs);
}
real getDiffAndPrint(real newCost1,
@@ -241,7 +241,7 @@ void testBatchState(LayerPtr testLayer,
std::vector args;
args.push_back(out);
- EXPECT_EQ(0, Argument::sumCosts(args)) << "testBatchState failed";
+ EXPECT_EQ(0, Argument::sum(args)) << "testBatchState failed";
for (size_t seqId = 0; seqId < numSequences; ++seqId) {
start[seqId] += seqLens[seqId];
}
@@ -672,7 +672,7 @@ void testLayerGradKernel(TestConfig testConf,
outArgs[0].value->dotMul(*testLayer->getOutput().value, *weights);
}
- real cost = Argument::sumCosts(outArgs);
+ real cost = Argument::sum(outArgs);
LOG(INFO) << " cost " << cost;
EXPECT_FALSE(std::isnan(cost));
diff --git a/paddle/parameter/Argument.h b/paddle/parameter/Argument.h
index 178c068b93ac5fc1e06200984f14da86069cf7e4..9ef44be0cb3b960db1e789f3f26bb66d1fe63c81 100644
--- a/paddle/parameter/Argument.h
+++ b/paddle/parameter/Argument.h
@@ -163,7 +163,7 @@ struct Argument {
: sequenceStartPositions->getData(false);
}
- static inline real sumCosts(const std::vector& arguments) {
+ static inline real sum(const std::vector& arguments) {
real cost = 0;
for (auto& arg : arguments) {
if (arg.value) {
diff --git a/paddle/scripts/travis/docs.sh b/paddle/scripts/travis/docs.sh
index 6b43cad20b76e9abeb3cb10a726d3d8e3da5f8e2..53e998ef6c1b96d9e7d82b7effd12a27e6dc69f2 100755
--- a/paddle/scripts/travis/docs.sh
+++ b/paddle/scripts/travis/docs.sh
@@ -2,8 +2,12 @@
# Add set -e, cd to directory.
source ./common.sh
-
# Compile Documentation only.
+cmake .. -DCMAKE_BUILD_TYPE=Debug -DCMAKE_Fortran_COMPILER=/usr/bin/gfortran-4.8 -DWITH_GPU=OFF -DWITH_DOC=OFF -DWITH_STYLE_CHECK=OFF ${EXTRA_CMAKE_OPTS}
+mkdir output
+make DESTDIR=./output install -j `nproc`
+pip install ./output/usr/local/opt/paddle/share/wheels/*
+rm -rf *
cmake .. -DCMAKE_BUILD_TYPE=Debug -DCMAKE_Fortran_COMPILER=/usr/bin/gfortran-4.8 -DWITH_GPU=OFF -DWITH_DOC=ON ${EXTRA_CMAKE_OPTS}
make paddle_docs paddle_docs_cn
@@ -25,26 +29,41 @@ TARGET_BRANCH="gh-pages"
# Only deploy master branch to build latest documentation.
SOURCE_BRANCH="master"
-# If is not a Github pull request, and in master branch.
-if [ "$TRAVIS_PULL_REQUEST" != "false" -o "$TRAVIS_BRANCH" != "$SOURCE_BRANCH" ]; then
- exit 0
-fi
-
# Clone the repo to output directory
git clone $REPO output
cd output
-# checkout github page branch
-git checkout $TARGET_BRANCH || git checkout --orphan $TARGET_BRANCH
+function deploy_docs() {
+ SOURCE_BRANCH=$1
+ DIR=$2
+ # If is not a Github pull request
+ if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then
+ exit 0
+ fi
+ # If it is not watched branch.
+ if [ "$TRAVIS_BRANCH" != "$SOURCE_BRANCH" ]; then
+ return
+ fi
-# remove old docs. mv new docs.
-rm -rf doc doc_cn
-mv ../doc/cn/html doc_cn
-mv ../doc/en/html doc
+ # checkout github page branch
+ git checkout $TARGET_BRANCH || git checkout --orphan $TARGET_BRANCH
+
+ mkdir -p ${DIR}
+ # remove old docs. mv new docs.
+ set +e
+ rm -rf ${DIR}/doc ${DIR}/doc_cn
+ set -e
+ mv ../doc/cn/html ${DIR}/doc_cn
+ mv ../doc/en/html ${DIR}/doc
+ git add .
+}
+
+deploy_docs "master" "."
+deploy_docs "develop" "./develop/"
# Check is there anything changed.
set +e
-git diff --exit-code >/dev/null
+git diff --cached --exit-code >/dev/null
if [ $? -eq 0 ]; then
echo "No changes to the output on this push; exiting."
exit 0
@@ -57,7 +76,6 @@ if [ -n $SSL_KEY ]; then # Only push updated docs for github.com/PaddlePaddle/P
git config user.name "Travis CI"
git config user.email "paddle-dev@baidu.com"
git commit -m "Deploy to GitHub Pages: ${SHA}"
-
# Set ssh private key
openssl aes-256-cbc -K $SSL_KEY -iv $SSL_IV -in ../../paddle/scripts/travis/deploy_key.enc -out deploy_key -d
chmod 600 deploy_key
diff --git a/paddle/trainer/Tester.cpp b/paddle/trainer/Tester.cpp
index 13aa28ae5d9699d267858d48e46797c756487ddd..80664fa877b324af73e3e3effa11e46eac6294e2 100644
--- a/paddle/trainer/Tester.cpp
+++ b/paddle/trainer/Tester.cpp
@@ -208,7 +208,7 @@ real Tester::forwardOneBatch(const DataBatch& dataBatch,
return 0.0; // In this case, there is no meaning to calculate cost
}
- return Argument::sumCosts(outArgs);
+ return Argument::sum(outArgs);
}
void Tester::testOnePassBatch(int passId) {
diff --git a/paddle/trainer/Trainer.cpp b/paddle/trainer/Trainer.cpp
index bd84545375117b178d4324f0ad03f5bc35ae925d..b68e29cd5ea223272151e7a8b52d998832f47103 100644
--- a/paddle/trainer/Trainer.cpp
+++ b/paddle/trainer/Trainer.cpp
@@ -310,7 +310,7 @@ real Trainer::checkGradient() {
std::vector outArgs;
trainerInternal_.getGradientMachine()->forward(inArgs, &outArgs, PASS_GC);
- real cost = Argument::sumCosts(outArgs);
+ real cost = Argument::sum(outArgs);
LOG(INFO) << "original cost=" << cost;
trainerInternal_.getGradientMachine()->backward();
@@ -340,7 +340,7 @@ real Trainer::checkGradient() {
parameter->getBuf(PARAMETER_VALUE)->copyFrom(newPara);
parameter->setValueUpdated();
trainerInternal_.getGradientMachine()->forward(inArgs, &outArgs, PASS_GC);
- real newCost1 = Argument::sumCosts(outArgs);
+ real newCost1 = Argument::sum(outArgs);
for (size_t i = 0; i < dim; ++i) {
newp[i] = oldp[i] - step * d[i];
@@ -349,7 +349,7 @@ real Trainer::checkGradient() {
parameter->getBuf(PARAMETER_VALUE)->copyFrom(newPara);
parameter->setValueUpdated();
trainerInternal_.getGradientMachine()->forward(inArgs, &outArgs, PASS_GC);
- real newCost2 = Argument::sumCosts(outArgs);
+ real newCost2 = Argument::sum(outArgs);
real trueDelta = 0.5 * (newCost1 - newCost2);
real diff = (1e-20 + trueDelta) / (1e-20 + delta) - 1;
@@ -575,7 +575,7 @@ real Trainer::calcGradient(const DataBatch& dataBatch,
trainerInternal_.getGradientMachine()->forwardBackward(
inArgs, &outArgs, PASS_TRAIN);
- real cost = Argument::sumCosts(outArgs);
+ real cost = Argument::sum(outArgs);
offset = 0;
for (auto& para : parameters) {
diff --git a/paddle/trainer/TrainerInternal.cpp b/paddle/trainer/TrainerInternal.cpp
index f3b465b444167d4624a5e99c30e1257eda53ca2c..4c5d4a0913aaf3a9932b3d67806378ece4245304 100644
--- a/paddle/trainer/TrainerInternal.cpp
+++ b/paddle/trainer/TrainerInternal.cpp
@@ -134,7 +134,7 @@ void TrainerInternal::trainOneBatch(int64_t batchId,
real cost = 0;
{
REGISTER_TIMER("sumCost");
- cost = Argument::sumCosts(*outArgs);
+ cost = Argument::sum(*outArgs);
}
if (batchId % intconfig_->log_period == 0) {
diff --git a/python/paddle/trainer/PyDataProvider2.py b/python/paddle/trainer/PyDataProvider2.py
index bd24c68b6fe88eab03c814f8cac70db3880316f4..4e3c4db853205bb12272e86295784a6069483ffe 100644
--- a/python/paddle/trainer/PyDataProvider2.py
+++ b/python/paddle/trainer/PyDataProvider2.py
@@ -65,14 +65,18 @@ def sparse_value_slot(dim, seq_type=SequenceType.NO_SEQUENCE):
return InputType(dim, seq_type, DataType.SparseValue)
-def index_slot(dim, seq_type=SequenceType.NO_SEQUENCE):
- return InputType(dim, seq_type, DataType.Index)
+def index_slot(value_range, seq_type=SequenceType.NO_SEQUENCE):
+ """Data type of integer.
+ :param value_range: range of this integer.
+ """
+ return InputType(value_range, seq_type, DataType.Index)
dense_vector = dense_slot
sparse_binary_vector = sparse_non_value_slot
sparse_vector = sparse_value_slot
integer_value = index_slot
+integer_value.__doc__ = index_slot.__doc__
def dense_vector_sequence(dim):
@@ -99,8 +103,11 @@ def sparse_vector_sub_sequence(dim):
return sparse_vector(dim, seq_type=SequenceType.SUB_SEQUENCE)
-def integer_value_sequence(dim):
- return integer_value(dim, seq_type=SequenceType.SEQUENCE)
+def integer_value_sequence(value_range):
+ """Data type of a sequence of integer.
+ :param value_range: range of each element.
+ """
+ return integer_value(value_range, seq_type=SequenceType.SEQUENCE)
def integer_value_sub_sequence(dim):
@@ -108,6 +115,7 @@ def integer_value_sub_sequence(dim):
integer_sequence = integer_value_sequence
+integer_sequence.__doc__ = integer_value_sequence.__doc__
class SingleSlotWrapper(object):
diff --git a/python/paddle/v2/__init__.py b/python/paddle/v2/__init__.py
index 05227ca334594711763f9e1dd41709a212da547a..0ec7e8a4543421caa25a1e120b70a8c24283256b 100644
--- a/python/paddle/v2/__init__.py
+++ b/python/paddle/v2/__init__.py
@@ -25,7 +25,7 @@ from . import dataset
from . import reader
import attr
import pooling
-import inferencer
+import inference
import networks
import py_paddle.swig_paddle as api
import minibatch
@@ -33,7 +33,7 @@ import minibatch
__all__ = [
'optimizer', 'layer', 'activation', 'parameters', 'init', 'trainer',
'event', 'data_type', 'attr', 'pooling', 'data_feeder', 'dataset', 'reader',
- 'topology', 'networks', 'inferencer', 'infer'
+ 'topology', 'networks', 'infer'
]
@@ -44,6 +44,5 @@ def init(**kwargs):
api.initPaddle(*args)
-
infer = inferencer.infer
-batch = minibatch.batch
+batch = minibatch.batch
\ No newline at end of file
diff --git a/python/paddle/v2/dataset/__init__.py b/python/paddle/v2/dataset/__init__.py
index 82f11a7c41149c2231130dc7c2205debb643aa89..6c371d3c9bdee94a91b9a48ff7c4a006c8d7eb21 100644
--- a/python/paddle/v2/dataset/__init__.py
+++ b/python/paddle/v2/dataset/__init__.py
@@ -20,8 +20,9 @@ import movielens
import conll05
import uci_housing
import sentiment
+import wmt14
__all__ = [
'mnist', 'imikolov', 'imdb', 'cifar', 'movielens', 'conll05', 'sentiment'
- 'uci_housing'
+ 'uci_housing', 'wmt14'
]
diff --git a/python/paddle/v2/dataset/imikolov.py b/python/paddle/v2/dataset/imikolov.py
index 285d3eaca8317c78dc84e99b4d524a0f4872c687..deb556942d9b0490ffab8cef90aae8f365652129 100644
--- a/python/paddle/v2/dataset/imikolov.py
+++ b/python/paddle/v2/dataset/imikolov.py
@@ -17,7 +17,7 @@ imikolov's simple dataset: http://www.fit.vutbr.cz/~imikolov/rnnlm/
import paddle.v2.dataset.common
import tarfile
-__all__ = ['train', 'test']
+__all__ = ['train', 'test', 'build_dict']
URL = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz'
MD5 = '30177ea32e27c525793142b6bf2c8e2d'
@@ -37,7 +37,9 @@ def word_count(f, word_freq=None):
return word_freq
-def build_dict(train_filename, test_filename):
+def build_dict():
+ train_filename = './simple-examples/data/ptb.train.txt'
+ test_filename = './simple-examples/data/ptb.valid.txt'
with tarfile.open(
paddle.v2.dataset.common.download(
paddle.v2.dataset.imikolov.URL, 'imikolov',
@@ -45,27 +47,22 @@ def build_dict(train_filename, test_filename):
trainf = tf.extractfile(train_filename)
testf = tf.extractfile(test_filename)
word_freq = word_count(testf, word_count(trainf))
+ if '' in word_freq:
+ # remove for now, since we will set it as last index
+ del word_freq['']
TYPO_FREQ = 50
word_freq = filter(lambda x: x[1] > TYPO_FREQ, word_freq.items())
- dictionary = sorted(word_freq, key=lambda x: (-x[1], x[0]))
- words, _ = list(zip(*dictionary))
+ word_freq_sorted = sorted(word_freq, key=lambda x: (-x[1], x[0]))
+ words, _ = list(zip(*word_freq_sorted))
word_idx = dict(zip(words, xrange(len(words))))
word_idx[''] = len(words)
return word_idx
-word_idx = {}
-
-
-def reader_creator(filename, n):
- global word_idx
- if len(word_idx) == 0:
- word_idx = build_dict('./simple-examples/data/ptb.train.txt',
- './simple-examples/data/ptb.valid.txt')
-
+def reader_creator(filename, word_idx, n):
def reader():
with tarfile.open(
paddle.v2.dataset.common.download(
@@ -84,9 +81,9 @@ def reader_creator(filename, n):
return reader
-def train(n):
- return reader_creator('./simple-examples/data/ptb.train.txt', n)
+def train(word_idx, n):
+ return reader_creator('./simple-examples/data/ptb.train.txt', word_idx, n)
-def test(n):
- return reader_creator('./simple-examples/data/ptb.valid.txt', n)
+def test(word_idx, n):
+ return reader_creator('./simple-examples/data/ptb.valid.txt', word_idx, n)
diff --git a/python/paddle/v2/dataset/tests/imikolov_test.py b/python/paddle/v2/dataset/tests/imikolov_test.py
index 9b1748eaaa7f913a6b94f2087a8089fb998570aa..009e55243a594e5e235c36fb0223ec70754d17f3 100644
--- a/python/paddle/v2/dataset/tests/imikolov_test.py
+++ b/python/paddle/v2/dataset/tests/imikolov_test.py
@@ -1,6 +1,8 @@
import paddle.v2.dataset.imikolov
import unittest
+WORD_DICT = paddle.v2.dataset.imikolov.build_dict()
+
class TestMikolov(unittest.TestCase):
def check_reader(self, reader, n):
@@ -9,11 +11,15 @@ class TestMikolov(unittest.TestCase):
def test_train(self):
n = 5
- self.check_reader(paddle.v2.dataset.imikolov.train(n), n)
+ self.check_reader(paddle.v2.dataset.imikolov.train(WORD_DICT, n), n)
def test_test(self):
n = 5
- self.check_reader(paddle.v2.dataset.imikolov.test(n), n)
+ self.check_reader(paddle.v2.dataset.imikolov.test(WORD_DICT, n), n)
+
+ def test_total(self):
+ _, idx = zip(*WORD_DICT.items())
+ self.assertEqual(sorted(idx)[-1], len(WORD_DICT) - 1)
if __name__ == '__main__':
diff --git a/python/paddle/v2/dataset/wmt14.py b/python/paddle/v2/dataset/wmt14.py
new file mode 100644
index 0000000000000000000000000000000000000000..9904848b5d3ef95dc331fc0ba1a98f29f8b1dfeb
--- /dev/null
+++ b/python/paddle/v2/dataset/wmt14.py
@@ -0,0 +1,142 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+wmt14 dataset
+"""
+import paddle.v2.dataset.common
+import tarfile
+import os.path
+import itertools
+
+__all__ = ['train', 'test', 'build_dict']
+
+URL_DEV_TEST = 'http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/dev+test.tgz'
+MD5_DEV_TEST = '7d7897317ddd8ba0ae5c5fa7248d3ff5'
+URL_TRAIN = 'http://localhost:8000/train.tgz'
+MD5_TRAIN = '72de99da2830ea5a3a2c4eb36092bbc7'
+
+
+def word_count(f, word_freq=None):
+ add = paddle.v2.dataset.common.dict_add
+ if word_freq == None:
+ word_freq = {}
+
+ for l in f:
+ for w in l.strip().split():
+ add(word_freq, w)
+ add(word_freq, '')
+ add(word_freq, '')
+
+ return word_freq
+
+
+def get_word_dix(word_freq):
+ TYPO_FREQ = 50
+ word_freq = filter(lambda x: x[1] > TYPO_FREQ, word_freq.items())
+ word_freq_sorted = sorted(word_freq, key=lambda x: (-x[1], x[0]))
+ words, _ = list(zip(*word_freq_sorted))
+ word_idx = dict(zip(words, xrange(len(words))))
+ word_idx[''] = len(words)
+ return word_idx
+
+
+def get_word_freq(train, dev):
+ word_freq = word_count(train, word_count(dev))
+ if '' in word_freq:
+ # remove for now, since we will set it as last index
+ del word_freq['']
+ return word_freq
+
+
+def build_dict():
+ base_dir = './wmt14-data'
+ train_en_filename = base_dir + '/train/train.en'
+ train_fr_filename = base_dir + '/train/train.fr'
+ dev_en_filename = base_dir + '/dev/ntst1213.en'
+ dev_fr_filename = base_dir + '/dev/ntst1213.fr'
+
+ if not os.path.exists(train_en_filename) or not os.path.exists(
+ train_fr_filename):
+ with tarfile.open(
+ paddle.v2.dataset.common.download(URL_TRAIN, 'wmt14',
+ MD5_TRAIN)) as tf:
+ tf.extractall(base_dir)
+
+ if not os.path.exists(dev_en_filename) or not os.path.exists(
+ dev_fr_filename):
+ with tarfile.open(
+ paddle.v2.dataset.common.download(URL_DEV_TEST, 'wmt14',
+ MD5_DEV_TEST)) as tf:
+ tf.extractall(base_dir)
+
+ f_en = open(train_en_filename)
+ f_fr = open(train_fr_filename)
+ f_en_dev = open(dev_en_filename)
+ f_fr_dev = open(dev_fr_filename)
+
+ word_freq_en = get_word_freq(f_en, f_en_dev)
+ word_freq_fr = get_word_freq(f_fr, f_fr_dev)
+
+ f_en.close()
+ f_fr.close()
+ f_en_dev.close()
+ f_fr_dev.close()
+
+ return get_word_dix(word_freq_en), get_word_dix(word_freq_fr)
+
+
+def reader_creator(directory, path_en, path_fr, URL, MD5, dict_en, dict_fr):
+ def reader():
+ if not os.path.exists(path_en) or not os.path.exists(path_fr):
+ with tarfile.open(
+ paddle.v2.dataset.common.download(URL, 'wmt14', MD5)) as tf:
+ tf.extractall(directory)
+
+ f_en = open(path_en)
+ f_fr = open(path_fr)
+ UNK_en = dict_en['']
+ UNK_fr = dict_fr['']
+
+ for en, fr in itertools.izip(f_en, f_fr):
+ src_ids = [dict_en.get(w, UNK_en) for w in en.strip().split()]
+ tar_ids = [
+ dict_fr.get(w, UNK_fr)
+ for w in [''] + fr.strip().split() + ['']
+ ]
+
+ # remove sequence whose length > 80 in training mode
+ if len(src_ids) == 0 or len(tar_ids) <= 1 or len(
+ src_ids) > 80 or len(tar_ids) > 80:
+ continue
+
+ yield src_ids, tar_ids[:-1], tar_ids[1:]
+
+ f_en.close()
+ f_fr.close()
+
+ return reader
+
+
+def train(dict_en, dict_fr):
+ directory = './wmt14-data'
+ return reader_creator(directory, directory + '/train/train.en',
+ directory + '/train/train.fr', URL_TRAIN, MD5_TRAIN,
+ dict_en, dict_fr)
+
+
+def test(dict_en, dict_fr):
+ directory = './wmt14-data'
+ return reader_creator(directory, directory + '/dev/ntst1213.en',
+ directory + '/dev/ntst1213.fr', URL_DEV_TEST,
+ MD5_DEV_TEST, dict_en, dict_fr)
diff --git a/python/paddle/v2/inferencer.py b/python/paddle/v2/inference.py
similarity index 98%
rename from python/paddle/v2/inferencer.py
rename to python/paddle/v2/inference.py
index ac03b016c9b8bfbc586072855402ed3a373e9b54..476fd3fa4523a77709f68c73c73e6851e04064aa 100644
--- a/python/paddle/v2/inferencer.py
+++ b/python/paddle/v2/inference.py
@@ -5,7 +5,7 @@ from data_feeder import DataFeeder
import itertools
import numpy
-__all__ = ['Inference', 'infer']
+__all__ = ['infer']
class Inference(object):
diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py
index 010773ddbd96d4226cccc1a63cfc133b78bdcffe..711226d659d49fc2646c34c011c7773ae2517ec9 100644
--- a/python/paddle/v2/layer.py
+++ b/python/paddle/v2/layer.py
@@ -12,58 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""
-Before this new package paddle.v2.layer, users would need to use functions
-in paddle.trainer_config_helpers.layers to configure networks.
-
-The Old Way:
-=========
-This old way requires that the creation of a network be defined in a Python
-function, say network_config, and that this Python function being passed to
-paddle.trainer_config_helpers.parse_network_config for the creation of
-protobuf message description of this network.
-
-```python
-def network_config():
- img = paddle.trainer_config_helpers.data_layer(name="pixel", size=784)
- inference = paddle.trainer_config_helpers.fc_layer(
- input=img,
- size=10,
- act=paddle.trainer_config_helpers.SoftmaxActivation())
- cost = paddle.trainer_config_helpers.classification_cost(
- input=inference,
- label=paddle.trainer_config_helpers.data_layer(name="label", size=10))
-
-proto_desc = parse_network_config(network_config)
-```
-
-When parse_network_config executes network_config, those layer definition
-functions like data_layer and fc_layer would change some Python global variables,
-so that after the execution, parse_network_config could collect information from
-these global variables and generates the protobuf message.
-
-
-
-The New Way:
-=========
-In this PR, we define a function in paddle.v2.layer which creates a Python
-class for each layer creation function in paddle.trainer_config_helpers.layers.
-Users can use create a network as follows:
-
-```python
-img = paddle.v2.layer.data(name="pixel", size=784)
-inference = paddle.v2.layer.fc(input=img, size=10, act=paddle.v2.layer.Softmax())
-cost = paddle.v2.layer.classification(
- input=inference,
- label=paddle.v2.layer.data(name="label", size=10))
-
-parameters = paddle.v2.parameters.create(cost)
-```
-
-This new way doesn't require those invocations to layer definition functions
-to be in a Python function but could be anywhere.
-
-Also, the creation of a protobuf message is hidden in the invocation of
-paddle.v2.parameters.create, no longer exposed to users.
+`paddle.v2.layer` is a part of model config packages in paddle.v2. In API v2,
+we want to make Paddle a plain Python package. The model config package defined
+the way how to configure a neural network topology in Paddle Python code.
+
+The primary usage shows below.
+
+.. code-block:: python
+
+ import paddle.v2 as paddle
+
+ img = paddle.layer.data(name='img', type=paddle.data_type.dense_vector(784))
+ hidden = paddle.layer.fc(input=img, size=200)
+ prediction = paddle.layer.fc(input=hidden, size=10,
+ act=paddle.activation.Softmax())
+
+ # use prediction instance where needed.
+ parameters = paddle.v2.parameters.create(cost)
"""
import collections
diff --git a/python/paddle/v2/tests/test_data_feeder.py b/python/paddle/v2/tests/test_data_feeder.py
index ab2bc5df76cd839b5b0184e9559f0c2e03baf38b..71eb3bf31425c22b47accc11c9550042e077ef12 100644
--- a/python/paddle/v2/tests/test_data_feeder.py
+++ b/python/paddle/v2/tests/test_data_feeder.py
@@ -110,14 +110,14 @@ class DataFeederTest(unittest.TestCase):
self.assertAlmostEqual(value.all(), w[i].all())
def test_integer(self):
- dim = 100
+ value_range = 100
batch_size = 32
index = []
for i in xrange(batch_size):
each_sample = []
- each_sample.append(np.random.randint(dim))
+ each_sample.append(np.random.randint(value_range))
index.append(each_sample)
- feeder = DataFeeder([('input', data_type.integer_value(dim))],
+ feeder = DataFeeder([('input', data_type.integer_value(value_range))],
{'input': 0})
arg = feeder(index)
output = arg.getSlotIds(0).copyToNumpyArray()
@@ -125,7 +125,7 @@ class DataFeederTest(unittest.TestCase):
self.assertEqual(output.all(), index.flatten().all())
def test_integer_sequence(self):
- dim = 10000
+ value_range = 10000
batch_size = 32
start = [0]
data = []
@@ -133,11 +133,12 @@ class DataFeederTest(unittest.TestCase):
each_sample = []
each_sample.append(
self.sparse_binary_reader(
- dim, 30, non_empty=True))
+ value_range, 30, non_empty=True))
data.append(each_sample)
start.append(len(each_sample[0]) + start[-1])
- feeder = DataFeeder([('input', data_type.integer_value_sequence(dim))],
- {'input': 0})
+ feeder = DataFeeder(
+ [('input', data_type.integer_value_sequence(value_range))],
+ {'input': 0})
arg = feeder(data)
output_data = arg.getSlotIds(0).copyToNumpyArray()
output_start = arg.getSlotSequenceStartPositions(0).copyToNumpyArray()
diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py
index b4a713f7d53c2c5aef7e356906f88475037fa8d2..8bcdd122b30bde91f652f351dddc27734fdf33cf 100644
--- a/python/paddle/v2/trainer.py
+++ b/python/paddle/v2/trainer.py
@@ -8,7 +8,7 @@ from . import event as v2_event
from . import optimizer as v2_optimizer
from . import parameters as v2_parameters
-__all__ = ['ITrainer', 'SGD']
+__all__ = ['SGD']
def default_event_handler(event):
@@ -22,26 +22,7 @@ def default_event_handler(event):
pass
-class ITrainer(object):
- """
- The interface of Trainer. The only exposed method is `train`.
- """
-
- def train(self, reader, topology, parameters, event_handler=None):
- """
- train method.
-
- :param reader:
- :param topology:
- :param parameters:
- :param event_handler:
- :return:
- """
-
- raise NotImplementedError()
-
-
-class SGD(ITrainer):
+class SGD():
def __init__(self, cost, parameters, update_equation):
"""
Simple SGD Trainer.