diff --git a/.travis.yml b/.travis.yml index ffe3bc193b49eb3b3318cbbc7f1c3d86dc205c14..effcf90769647960d55b971af0939496dc850e7a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -42,7 +42,7 @@ addons: before_install: - | if [ ${JOB} == "BUILD_AND_TEST" ]; then - if ! git diff --name-only $TRAVIS_COMMIT_RANGE | grep -qvE '(\.md$)' + if ! git diff --name-only $TRAVIS_COMMIT_RANGE | grep -qvE '(\.md$)|(\.rst$)|(\.jpg$)|(\.png$)' then echo "Only markdown docs were updated, stopping build process." exit diff --git a/CMakeLists.txt b/CMakeLists.txt index 090ac9e188422099cc4270b87064b5590e7b620c..af193c27ae7d802a8724fdc1e23b4b5b583e9f7c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -36,6 +36,7 @@ option(WITH_RDMA "Compile PaddlePaddle with rdma support" OFF) option(WITH_GLOG "Compile PaddlePaddle use glog, otherwise use a log implement internally" ${LIBGLOG_FOUND}) option(WITH_GFLAGS "Compile PaddlePaddle use gflags, otherwise use a flag implement internally" ${GFLAGS_FOUND}) option(WITH_TIMER "Compile PaddlePaddle use timer" OFF) +option(WITH_PROFILER "Compile PaddlePaddle use gpu profiler" OFF) option(WITH_TESTING "Compile and run unittest for PaddlePaddle" ${GTEST_FOUND}) option(WITH_DOC "Compile PaddlePaddle with documentation" OFF) option(WITH_SWIG_PY "Compile PaddlePaddle with py PaddlePaddle prediction api" ${SWIG_FOUND}) @@ -115,7 +116,6 @@ else() endif(WITH_AVX) if(WITH_DSO) - set(CUDA_LIBRARIES "") add_definitions(-DPADDLE_USE_DSO) endif(WITH_DSO) @@ -135,6 +135,10 @@ if(NOT WITH_TIMER) add_definitions(-DPADDLE_DISABLE_TIMER) endif(NOT WITH_TIMER) +if(NOT WITH_PROFILER) + add_definitions(-DPADDLE_DISABLE_PROFILER) +endif(NOT WITH_PROFILER) + if(WITH_AVX) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${AVX_FLAG}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${AVX_FLAG}") diff --git a/demo/image_classification/train.sh b/demo/image_classification/train.sh index ed9b5220fff6a434cd332f0972d39c4149b3ebfe..db0a057bf35b4ad04a08a1e3f1fad3bd6a486350 100755 --- a/demo/image_classification/train.sh +++ b/demo/image_classification/train.sh @@ -24,7 +24,7 @@ paddle train \ --test_all_data_in_one_period=1 \ --use_gpu=1 \ --trainer_count=1 \ ---num_passes=200 \ +--num_passes=300 \ --save_dir=$output \ 2>&1 | tee $log diff --git a/demo/model_zoo/embedding/pre_DictAndModel.sh b/demo/model_zoo/embedding/pre_DictAndModel.sh index 7821850fb25cc5b87aa305c2113efbf50b093ed1..6d647f5dd9368eaf81c19386511c7d231e4799e3 100755 --- a/demo/model_zoo/embedding/pre_DictAndModel.sh +++ b/demo/model_zoo/embedding/pre_DictAndModel.sh @@ -18,7 +18,5 @@ set -x # download the dictionary and pretrained model for file in baidu.dict model_32.emb model_64.emb model_128.emb model_256.emb do - # following is the google drive address - # you can also directly download from https://pan.baidu.com/s/1o8q577s - wget https://www.googledrive.com/host/0B7Q8d52jqeI9ejh6Q1RpMTFQT1k/embedding/$file --no-check-certificate + wget http://paddlepaddle.bj.bcebos.com/model_zoo/embedding/$file done diff --git a/demo/model_zoo/resnet/get_model.sh b/demo/model_zoo/resnet/get_model.sh index 89312d43edf8e4e7d639be73d5b3983ea916b902..133d08fca431540f2ed5cd6e63b51d9ce3a1b344 100755 --- a/demo/model_zoo/resnet/get_model.sh +++ b/demo/model_zoo/resnet/get_model.sh @@ -24,9 +24,7 @@ echo "Downloading ResNet models..." for file in resnet_50.tar.gz resnet_101.tar.gz resnet_152.tar.gz mean_meta_224.tar.gz do - # following is the google drive address - # you can also directly download from https://pan.baidu.com/s/1o8q577s - wget https://www.googledrive.com/host/0B7Q8d52jqeI9ejh6Q1RpMTFQT1k/imagenet/$file --no-check-certificate + wget http://paddlepaddle.bj.bcebos.com/model_zoo/imagenet/$file tar -xvf $file rm $file done diff --git a/demo/quick_start/data/README.md b/demo/quick_start/data/README.md new file mode 100644 index 0000000000000000000000000000000000000000..63abcf7ebf31903213e44cf492b93e09f61db14e --- /dev/null +++ b/demo/quick_start/data/README.md @@ -0,0 +1,9 @@ +This dataset consists of electronics product reviews associated with +binary labels (positive/negative) for sentiment classification. + +The preprocessed data can be downloaded by script `get_data.sh`. +The data was derived from reviews_Electronics_5.json.gz at + +http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/reviews_Electronics_5.json.gz + +If you want to process the raw data, you can use the script `proc_from_raw_data/get_data.sh`. diff --git a/demo/quick_start/data/get_data.sh b/demo/quick_start/data/get_data.sh index f355d63225b28ab495b34e72dd3be8d237ae08f4..952de3f3c8f52a7a6f84412f9b38f16ac2503ac2 100755 --- a/demo/quick_start/data/get_data.sh +++ b/demo/quick_start/data/get_data.sh @@ -17,14 +17,11 @@ set -e DIR="$( cd "$(dirname "$0")" ; pwd -P )" cd $DIR -echo "Downloading Amazon Electronics reviews data..." -# http://jmcauley.ucsd.edu/data/amazon/ -wget http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/reviews_Electronics_5.json.gz +# Download the preprocessed data +wget http://paddlepaddle.bj.bcebos.com/demo/quick_start_preprocessed_data/preprocessed_data.tar.gz -echo "Downloading mosesdecoder..." -#https://github.com/moses-smt/mosesdecoder -wget https://github.com/moses-smt/mosesdecoder/archive/master.zip +# Extract package +tar zxvf preprocessed_data.tar.gz -unzip master.zip -rm master.zip -echo "Done." +# Remove compressed package +rm preprocessed_data.tar.gz diff --git a/demo/quick_start/data/pred.list b/demo/quick_start/data/pred.list deleted file mode 100644 index d88b2b63851101a8b40e706b32d8c17b5fabb201..0000000000000000000000000000000000000000 --- a/demo/quick_start/data/pred.list +++ /dev/null @@ -1 +0,0 @@ -./data/pred.txt diff --git a/demo/quick_start/data/pred.txt b/demo/quick_start/data/pred.txt deleted file mode 100644 index 6ed5f738ddaff6645448d5e606dcef1baf01b282..0000000000000000000000000000000000000000 --- a/demo/quick_start/data/pred.txt +++ /dev/null @@ -1,2 +0,0 @@ -the device is cute , but that 's just about all that 's good. the specs are what you 'd expect : it 's a wifi mic , with some noise filter options. the app has the option to upload your baby 's name and photo , which is a cutesy touch. but the app is otherwise unstable and useless unless you upgrade for $ 60 / year.set up involves downloading the app , turning on the mic , switching your phone to the wifi network of the mic , telling the app your wifi settings , switching your wifi back to your home router. the app is then directly connected to your mic.the app is adware ! the main screen says " cry notifications on / off : upgrade to evoz premium and receive a text message of email when your baby is crying " .but the adware points out an important limitation , this monitor is only intended to be used from your home network. if you want to access it remotely , get a webcam. this app would make a lot more sense of the premium features were included with the hardware . -don 't be fooled by my one star rating. if there was a zero , i would have selected it. this product was a waste of my money.it has never worked like the company said it supposed to. i only have one device , an iphone 4gs. after charging the the iphone mid way , the i.sound portable power max 16,000 mah is completely drained. the led light no longer lit up. when plugging the isound portable power max into a wall outlet to charge , it would charge for about 20-30 minutes and then all four battery led indicator lit up showing a full charge. i would leave it on to charge for the full 8 hours or more but each time with the same result upon using. don 't buy this thing. put your money to good use elsewhere . diff --git a/demo/quick_start/preprocess.sh b/demo/quick_start/data/proc_from_raw_data/get_data.sh similarity index 65% rename from demo/quick_start/preprocess.sh rename to demo/quick_start/data/proc_from_raw_data/get_data.sh index c9190e2dd2ef754bf3c7287006322b52493dc3a0..cd85e26842dfccea78e4f26bdfee938887021f03 100755 --- a/demo/quick_start/preprocess.sh +++ b/demo/quick_start/data/proc_from_raw_data/get_data.sh @@ -16,10 +16,26 @@ # 1. size of pos : neg = 1:1. # 2. size of testing set = min(25k, len(all_data) * 0.1), others is traning set. # 3. distinct train set and test set. -# 4. build dict set -e +DIR="$( cd "$(dirname "$0")" ; pwd -P )" +cd $DIR + +# Download data +echo "Downloading Amazon Electronics reviews data..." +# http://jmcauley.ucsd.edu/data/amazon/ +wget http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/reviews_Electronics_5.json.gz +echo "Downloading mosesdecoder..." +# https://github.com/moses-smt/mosesdecoder +wget https://github.com/moses-smt/mosesdecoder/archive/master.zip + +unzip master.zip +rm master.zip + +################## +# Preprocess data +echo "Preprocess data..." export LC_ALL=C UNAME_STR=`uname` @@ -29,11 +45,11 @@ else SHUF_PROG='gshuf' fi -mkdir -p data/tmp -python preprocess.py -i data/reviews_Electronics_5.json.gz +mkdir -p tmp +python preprocess.py -i reviews_Electronics_5.json.gz # uniq and shuffle -cd data/tmp -echo 'uniq and shuffle...' +cd tmp +echo 'Uniq and shuffle...' cat pos_*|sort|uniq|${SHUF_PROG}> pos.shuffed cat neg_*|sort|uniq|${SHUF_PROG}> neg.shuffed @@ -53,11 +69,11 @@ cat train.pos train.neg | ${SHUF_PROG} >../train.txt cat test.pos test.neg | ${SHUF_PROG} >../test.txt cd - -echo 'data/train.txt' > data/train.list -echo 'data/test.txt' > data/test.list +echo 'train.txt' > train.list +echo 'test.txt' > test.list # use 30k dict -rm -rf data/tmp -mv data/dict.txt data/dict_all.txt -cat data/dict_all.txt | head -n 30001 > data/dict.txt -echo 'preprocess finished' +rm -rf tmp +mv dict.txt dict_all.txt +cat dict_all.txt | head -n 30001 > dict.txt +echo 'Done.' diff --git a/demo/quick_start/preprocess.py b/demo/quick_start/data/proc_from_raw_data/preprocess.py similarity index 95% rename from demo/quick_start/preprocess.py rename to demo/quick_start/data/proc_from_raw_data/preprocess.py index d87fad632a7429f7d9682badabe4c72ca127354f..56c2c5f16ceb63ff88fa51ed78c2e77ea5b64592 100755 --- a/demo/quick_start/preprocess.py +++ b/demo/quick_start/data/proc_from_raw_data/preprocess.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -1. (remove HTML before or not)tokensizing +1. Tokenize the words and punctuation 2. pos sample : rating score 5; neg sample: rating score 1-2. Usage: @@ -76,7 +76,11 @@ def tokenize(sentences): sentences : a list of input sentences. return: a list of processed text. """ - dir = './data/mosesdecoder-master/scripts/tokenizer/tokenizer.perl' + dir = './mosesdecoder-master/scripts/tokenizer/tokenizer.perl' + if not os.path.exists(dir): + sys.exit( + "The ./mosesdecoder-master/scripts/tokenizer/tokenizer.perl does not exists." + ) tokenizer_cmd = [dir, '-l', 'en', '-q', '-'] assert isinstance(sentences, list) text = "\n".join(sentences) @@ -104,7 +108,7 @@ def tokenize_batch(id): num_batch, instance, pre_fix = parse_queue.get() if num_batch == -1: ### parse_queue finished tokenize_queue.put((-1, None, None)) - sys.stderr.write("tokenize theread %s finish\n" % (id)) + sys.stderr.write("Thread %s finish\n" % (id)) break tokenize_instance = tokenize(instance) tokenize_queue.put((num_batch, tokenize_instance, pre_fix)) diff --git a/demo/semantic_role_labeling/data/get_data.sh b/demo/semantic_role_labeling/data/get_data.sh index 55e33f4685627ed483aa6642c518a33558091531..99487e0d9a8c31d884c4a338386ad0ff8e5d9dc7 100644 --- a/demo/semantic_role_labeling/data/get_data.sh +++ b/demo/semantic_role_labeling/data/get_data.sh @@ -14,10 +14,10 @@ # limitations under the License. set -e wget http://www.cs.upc.edu/~srlconll/conll05st-tests.tar.gz -wget https://www.googledrive.com/host/0B7Q8d52jqeI9ejh6Q1RpMTFQT1k/semantic_role_labeling/verbDict.txt --no-check-certificate -wget https://www.googledrive.com/host/0B7Q8d52jqeI9ejh6Q1RpMTFQT1k/semantic_role_labeling/targetDict.txt --no-check-certificate -wget https://www.googledrive.com/host/0B7Q8d52jqeI9ejh6Q1RpMTFQT1k/semantic_role_labeling/wordDict.txt --no-check-certificate -wget https://www.googledrive.com/host/0B7Q8d52jqeI9ejh6Q1RpMTFQT1k/semantic_role_labeling/emb --no-check-certificate +wget http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/verbDict.txt +wget http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/targetDict.txt +wget http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/wordDict.txt +wget http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/emb tar -xzvf conll05st-tests.tar.gz rm conll05st-tests.tar.gz cp ./conll05st-release/test.wsj/words/test.wsj.words.gz . diff --git a/demo/semantic_role_labeling/dataprovider.py b/demo/semantic_role_labeling/dataprovider.py index d4c137ef42c4e2ec609f3e6f809363e602dfd8dd..2c8e13462730a2e980fa1c3fe342ef0e062ab5d7 100644 --- a/demo/semantic_role_labeling/dataprovider.py +++ b/demo/semantic_role_labeling/dataprovider.py @@ -25,12 +25,13 @@ def hook(settings, word_dict, label_dict, predicate_dict, **kwargs): #all inputs are integral and sequential type settings.slots = [ integer_value_sequence(len(word_dict)), - integer_value_sequence(len(predicate_dict)), integer_value_sequence(len(word_dict)), integer_value_sequence(len(word_dict)), integer_value_sequence(len(word_dict)), integer_value_sequence(len(word_dict)), - integer_value_sequence(len(word_dict)), integer_value_sequence(2), + integer_value_sequence(len(word_dict)), + integer_value_sequence(len(predicate_dict)), + integer_value_sequence(2), integer_value_sequence(len(label_dict)) ] @@ -63,5 +64,5 @@ def process(settings, file_name): label_list = label.split() label_slot = [settings.label_dict.get(w) for w in label_list] - yield word_slot, predicate_slot, ctx_n2_slot, ctx_n1_slot, \ - ctx_0_slot, ctx_p1_slot, ctx_p2_slot, mark_slot, label_slot + yield word_slot, ctx_n2_slot, ctx_n1_slot, \ + ctx_0_slot, ctx_p1_slot, ctx_p2_slot, predicate_slot, mark_slot, label_slot diff --git a/demo/semantic_role_labeling/predict.py b/demo/semantic_role_labeling/predict.py index 2761814e1811e701122e0be4850526c5b290c457..a7f1e8f81f59f6fe95fd29593ef1a826e652e570 100644 --- a/demo/semantic_role_labeling/predict.py +++ b/demo/semantic_role_labeling/predict.py @@ -55,18 +55,14 @@ class Prediction(): slots = [ integer_value_sequence(len_dict), - integer_value_sequence(len_pred), integer_value_sequence(len_dict), integer_value_sequence(len_dict), integer_value_sequence(len_dict), integer_value_sequence(len_dict), integer_value_sequence(len_dict), + integer_value_sequence(len_pred), integer_value_sequence(2) ] - integer_value_sequence(len_dict), integer_value_sequence(len_dict), - integer_value_sequence(len_dict), integer_value_sequence(len_dict), - integer_value_sequence(len_dict), integer_value_sequence(2) - ] self.converter = DataProviderConverter(slots) def load_dict_label(self, dict_file, label_file, predicate_dict_file): @@ -104,8 +100,8 @@ class Prediction(): marks = mark.split() mark_slot = [int(w) for w in marks] - yield word_slot, predicate_slot, ctx_n2_slot, ctx_n1_slot, \ - ctx_0_slot, ctx_p1_slot, ctx_p2_slot, mark_slot + yield word_slot, ctx_n2_slot, ctx_n1_slot, \ + ctx_0_slot, ctx_p1_slot, ctx_p2_slot, predicate_slot, mark_slot def predict(self, data_file, output_file): """ diff --git a/demo/semantic_role_labeling/predict.sh b/demo/semantic_role_labeling/predict.sh index d0acdb0bd093974485475cf796c6d41ac7899135..88ab5898f7d41056f4fe549b3145760783b27bf9 100644 --- a/demo/semantic_role_labeling/predict.sh +++ b/demo/semantic_role_labeling/predict.sh @@ -18,7 +18,7 @@ set -e function get_best_pass() { cat $1 | grep -Pzo 'Test .*\n.*pass-.*' | \ sed -r 'N;s/Test.* cost=([0-9]+\.[0-9]+).*\n.*pass-([0-9]+)/\1 \2/g' | \ - sort | head -n 1 + sort -n | head -n 1 } log=train.log diff --git a/demo/semantic_role_labeling/test.sh b/demo/semantic_role_labeling/test.sh index c4ab44f5ca08aefd18f2851a1410aa08563925a9..f9e1bdcd4c752474329d36c4de3378f7d58e7b4b 100644 --- a/demo/semantic_role_labeling/test.sh +++ b/demo/semantic_role_labeling/test.sh @@ -18,7 +18,7 @@ set -e function get_best_pass() { cat $1 | grep -Pzo 'Test .*\n.*pass-.*' | \ sed -r 'N;s/Test.* cost=([0-9]+\.[0-9]+).*\n.*pass-([0-9]+)/\1 \2/g' |\ - sort | head -n 1 + sort -n | head -n 1 } log=train.log diff --git a/demo/sentiment/test.sh b/demo/sentiment/test.sh index 098fbb91389b89c8b69ccf2f5d308e4e715ac950..c8b12a0e89dbddea56b4ee069ebf66f8d8630615 100755 --- a/demo/sentiment/test.sh +++ b/demo/sentiment/test.sh @@ -17,7 +17,7 @@ set -e function get_best_pass() { cat $1 | grep -Pzo 'Test .*\n.*pass-.*' | \ sed -r 'N;s/Test.* classification_error_evaluator=([0-9]+\.[0-9]+).*\n.*pass-([0-9]+)/\1 \2/g' |\ - sort | head -n 1 + sort -n | head -n 1 } log=train.log diff --git a/demo/seqToseq/data/paraphrase_data.sh b/demo/seqToseq/data/paraphrase_data.sh index ea1f8dbcfad35699189f6cd4efc81d97e8c89148..1b3f1d45e11fbd5e600e58f583e503a603e484ff 100755 --- a/demo/seqToseq/data/paraphrase_data.sh +++ b/demo/seqToseq/data/paraphrase_data.sh @@ -16,9 +16,7 @@ set -e set -x # download the in-house paraphrase dataset -# following is the google drive address -# you can also directly download from https://pan.baidu.com/s/1o8q577s -wget https://www.googledrive.com/host/0B7Q8d52jqeI9ejh6Q1RpMTFQT1k/embedding/paraphrase.tar.gz --no-check-certificate +wget http://paddlepaddle.bj.bcebos.com/model_zoo/embedding/paraphrase.tar.gz # untar the dataset tar -zxvf paraphrase.tar.gz diff --git a/demo/seqToseq/data/wmt14_model.sh b/demo/seqToseq/data/wmt14_model.sh index 2cec30688d27a57902cdf64d7be5712d12c69bdd..d6e7a732644dc188a165215ddf3f69e1514425eb 100755 --- a/demo/seqToseq/data/wmt14_model.sh +++ b/demo/seqToseq/data/wmt14_model.sh @@ -16,9 +16,7 @@ set -e set -x # download the pretrained model -# following is the google drive address -# you can also directly download from https://pan.baidu.com/s/1o8q577s -wget https://www.googledrive.com/host/0B7Q8d52jqeI9ejh6Q1RpMTFQT1k/wmt14_model.tar.gz --no-check-certificate +wget http://paddlepaddle.bj.bcebos.com/model_zoo/wmt14_model.tar.gz # untar the model tar -zxvf wmt14_model.tar.gz diff --git a/doc/build/build_from_source.md b/doc/build/build_from_source.md index e44fa0d38e9982e5d0ed159743994ce6acc51246..b932fbc0fa4443d2fd8abfc9d8a78e68c44f667c 100644 --- a/doc/build/build_from_source.md +++ b/doc/build/build_from_source.md @@ -95,7 +95,7 @@ As a simple example, consider the following: ```bash # necessary sudo apt-get update - sudo apt-get install -y g++ make cmake build-essential libatlas-base-dev python python-pip libpython-dev m4 libprotobuf-dev protobuf-compiler python-protobuf python-numpy git + sudo apt-get install -y g++ make cmake swig build-essential libatlas-base-dev python python-pip libpython-dev m4 libprotobuf-dev protobuf-compiler python-protobuf python-numpy git # optional sudo apt-get install libgoogle-glog-dev sudo apt-get install libgflags-dev @@ -149,15 +149,15 @@ If still not found, you can manually set it based on CMake error information fro As a simple example, consider the following: -- **Only CPU** +- **Only CPU with swig** ```bash - cmake .. -DWITH_GPU=OFF + cmake .. -DWITH_GPU=OFF -DWITH_SWIG_PY=ON ``` -- **GPU** +- **GPU with swig** ```bash - cmake .. -DWITH_GPU=ON + cmake .. -DWITH_GPU=ON -DWITH_SWIG_PY=ON ``` - **GPU with doc and swig** @@ -170,15 +170,13 @@ Finally, you can build PaddlePaddle: ```bash # you can add build option here, such as: -cmake .. -DWITH_GPU=ON -DCMAKE_INSTALL_PREFIX= +cmake .. -DWITH_GPU=ON -DCMAKE_INSTALL_PREFIX= -DWITH_SWIG_PY=ON # please use sudo make install, if you want to install PaddlePaddle into the system make -j `nproc` && make install # set PaddlePaddle installation path in ~/.bashrc export PATH=/bin:$PATH ``` -**Note:** - If you set `WITH_SWIG_PY=ON`, related python dependencies also need to be installed. Otherwise, PaddlePaddle will automatically install python dependencies at first time when user run paddle commands, such as `paddle version`, `paddle train`. diff --git a/doc/demo/quick_start/index_en.md b/doc/demo/quick_start/index_en.md index 80d816a768a71156ce72cda6ea92b749fbcdbe1f..ec548b5393d7b210d6409328c00917aeb679a451 100644 --- a/doc/demo/quick_start/index_en.md +++ b/doc/demo/quick_start/index_en.md @@ -59,12 +59,11 @@ To build your text classification system, your code will need to perform five st ## Preprocess data into standardized format In this example, you are going to use [Amazon electronic product review dataset](http://jmcauley.ucsd.edu/data/amazon/) to build a bunch of deep neural network models for text classification. Each text in this dataset is a product review. This dataset has two categories: “positive” and “negative”. Positive means the reviewer likes the product, while negative means the reviewer does not like the product. -`demo/quick_start` in the [source code](https://github.com/baidu/Paddle) provides scripts for downloading data and preprocessing data as shown below. The data process takes several minutes (about 3 minutes in our machine). +`demo/quick_start` in the [source code](https://github.com/PaddlePaddle/Paddle) provides script for downloading the preprocessed data as shown below. (If you want to process the raw data, you can use the script `demo/quick_start/data/proc_from_raw_data/get_data.sh`). ```bash cd demo/quick_start ./data/get_data.sh -./preprocess.sh ``` ## Transfer Data to Model @@ -477,7 +476,7 @@ The scripts of data downloading, network configurations, and training scrips are Word embedding 15MB 8.484% -trainer_config.bow.py +trainer_config.emb.py diff --git a/doc/index.rst b/doc/index.rst index 668ad75a902bdd14c6198c41380ae93e29cec0d3..76fb7a3ace8057d9cd34e03134c63ef0cd298cae 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -8,3 +8,4 @@ PaddlePaddle Documentation user_guide.rst dev/index.rst algorithm/index.rst + optimization/index.rst diff --git a/doc/optimization/gpu_profiling.rst b/doc/optimization/gpu_profiling.rst new file mode 100644 index 0000000000000000000000000000000000000000..667bf1364e7cd4c9098caba72a127228d78ca38b --- /dev/null +++ b/doc/optimization/gpu_profiling.rst @@ -0,0 +1,237 @@ +Profiling on PaddlePaddle +========================= + +This tutorial will guide you step-by-step through how to conduct profiling and performance tuning using built-in timer, **nvprof** and **nvvp**. + +- What is profiling? +- Why we need profiling? +- How to do profiling? +- Profile tools +- Hands-on Tutorial +- Profiling tips + +What's profiling? +================= +In software engineering, profiling is a form of dynamic program analysis that measures the space (memory) or time +complexity of a program, the usage of particular instructions, or the frequency and duration of function calls. +Most commonly, profiling information serves to aid program optimization. + +Briefly, profiler is used to measure application performance. Program analysis tools are extremely important for +understanding program behavior. Simple profiling can tell you that how long does an operation take? For advanced +profiling, it can interpret why does an operation take a long time? + +Why we need profiling? +====================== +Since training deep neural network typically take a very long time to get over, performance is gradually becoming +the most important thing in deep learning field. The first step to improve performance is to understand what parts +are slow. There is no point in improving performance of a region which doesn’t take much time! + + +How to do profiling? +==================== +To achieve maximum performance, there are five steps you can take to reach your goals. + +- Profile the code +- Find the slow parts +- Work out why they’re slow +- Make them fast +- Profile the code again + +Usually, processor has two key performance limits include float point throughput and +memory throughput. For GPU, it also need more parallelism to fulfill its potential. +This is why they can be so fast. + +Profiler Tools +============== +For general GPU profiling, a bunch of tools are provided from both NVIDIA and third party. + +**nvprof** is Nvidia profiler and **nvvp** is (GUI based) Nvidia visual profiler. +In this tutorial, we will focus on nvprof and nvvp. + +:code:`test_GpuProfiler` from :code:`paddle/math/tests` directory will be used to evaluate +above profilers. + +.. literalinclude:: ../../paddle/math/tests/test_GpuProfiler.cpp + :language: c++ + :lines: 111-124 + :linenos: + +The above code snippet includes two methods, you can use any of them to profile the regions of interest. + +1. :code:`REGISTER_TIMER_INFO` is a built-in timer wrapper which can calculate the time overhead of both cpu functions and cuda kernels. + +2. :code:`REGISTER_GPU_PROFILER` is a general purpose wrapper object of :code:`cudaProfilerStart` and :code:`cudaProfilerStop` to avoid +program crashes when CPU version of PaddlePaddle invokes them. + +You can find more details about how to use both of them in the next session. + +Hands-on Approach +================= + +Built-in Timer +-------------- + +To enable built-in timer in PaddlePaddle, first you have to add :code:`REGISTER_TIMER_INFO` into the regions of you interest. +Then, all information could be stamped in the console via :code:`printStatus` or :code:`printAllStatus` function. +As a simple example, consider the following: + +1. Add :code:`REGISTER_TIMER_INFO` and :code:`printAllStatus` functions (see the emphasize-lines). + + .. literalinclude:: ../../paddle/math/tests/test_GpuProfiler.cpp + :language: c++ + :lines: 111-124 + :emphasize-lines: 8-10,13 + :linenos: + +2. Configure cmake with **WITH_TIMER** and recompile PaddlePaddle. + + .. code-block:: bash + + cmake .. -DWITH_TIMER=ON + make + +3. Execute your code and observe the results (see the emphasize-lines). + + .. code-block:: bash + :emphasize-lines: 1,12-15 + + > ./paddle/math/tests/test_GpuProfiler + I1117 11:13:42.313065 2522362816 Util.cpp:155] commandline: ./paddle/math/tests/test_GpuProfiler + I1117 11:13:42.845065 2522362816 Util.cpp:130] Calling runInitFunctions + I1117 11:13:42.845208 2522362816 Util.cpp:143] Call runInitFunctions done. + [==========] Running 1 test from 1 test case. + [----------] Global test environment set-up. + [----------] 1 test from Profiler + [ RUN ] Profiler.BilinearFwdBwd + I1117 11:13:42.845310 2522362816 test_GpuProfiler.cpp:114] Enable GPU Profiler Stat: [testBilinearFwdBwd] "numSamples = 10, channels = 16, im + gSizeX = 64, imgSizeY = 64" + I1117 11:13:42.850154 2522362816 ThreadLocal.cpp:37] thread use undeterministic rand seed:20659751 + I1117 11:13:42.981501 2522362816 Stat.cpp:130] ======= StatSet: [GlobalStatInfo] status ====== + I1117 11:13:42.981539 2522362816 Stat.cpp:133] Stat=testBilinearFwdBwd total=136.141 avg=136.141 max=136.141 min=136.141 count=1 + I1117 11:13:42.981572 2522362816 Stat.cpp:141] ======= BarrierStatSet status ====== + I1117 11:13:42.981575 2522362816 Stat.cpp:154] -------------------------------------------------- + [ OK ] Profiler.BilinearFwdBwd (136 ms) + [----------] 1 test from Profiler (136 ms total) + + [----------] Global test environment tear-down + [==========] 1 test from 1 test case ran. (136 ms total) + [ PASSED ] 1 test. + +nvprof profiler +--------------- + +To use this command line profiler **nvprof**, you can simply issue the following command: + +1. Add :code:`REGISTER_GPU_PROFILER` function (see the emphasize-lines). + + .. literalinclude:: ../../paddle/math/tests/test_GpuProfiler.cpp + :language: c++ + :lines: 111-124 + :emphasize-lines: 6-7 + :linenos: + +2. Configure cmake with **WITH_PROFILER** and recompile PaddlePaddle. + + .. code-block:: bash + + cmake .. -DWITH_PROFILER=ON + make + +3. Use Nvidia profiler **nvprof** to profile the binary. + + .. code-block:: bash + + nvprof ./paddle/math/tests/test_GpuProfiler + +Then, you can get the following profiling result: + +.. code-block:: bash + + ==78544== Profiling application: ./paddle/math/tests/test_GpuProfiler + ==78544== Profiling result: + Time(%) Time Calls Avg Min Max Name + 27.60% 9.6305ms 5 1.9261ms 3.4560us 6.4035ms [CUDA memcpy HtoD] + 26.07% 9.0957ms 1 9.0957ms 9.0957ms 9.0957ms KeBilinearInterpBw + 23.78% 8.2977ms 1 8.2977ms 8.2977ms 8.2977ms KeBilinearInterpFw + 22.55% 7.8661ms 2 3.9330ms 1.5798ms 6.2863ms [CUDA memcpy DtoH] + + ==78544== API calls: + Time(%) Time Calls Avg Min Max Name + 46.85% 682.28ms 8 85.285ms 12.639us 682.03ms cudaStreamCreateWithFlags + 39.83% 580.00ms 4 145.00ms 302ns 550.27ms cudaFree + 9.82% 143.03ms 9 15.892ms 8.7090us 142.78ms cudaStreamCreate + 1.23% 17.983ms 7 2.5690ms 23.210us 6.4563ms cudaMemcpy + 1.23% 17.849ms 2 8.9247ms 8.4726ms 9.3768ms cudaStreamSynchronize + 0.66% 9.5969ms 7 1.3710ms 288.43us 2.4279ms cudaHostAlloc + 0.13% 1.9530ms 11 177.54us 7.6810us 591.06us cudaMalloc + 0.07% 1.0424ms 8 130.30us 1.6970us 453.72us cudaGetDevice + 0.04% 527.90us 40 13.197us 525ns 253.99us cudaEventCreateWithFlags + 0.03% 435.73us 348 1.2520us 124ns 42.704us cuDeviceGetAttribute + 0.03% 419.36us 1 419.36us 419.36us 419.36us cudaGetDeviceCount + 0.02% 260.75us 2 130.38us 129.32us 131.43us cudaGetDeviceProperties + 0.02% 222.32us 2 111.16us 106.94us 115.39us cudaLaunch + 0.01% 214.06us 4 53.514us 28.586us 77.655us cuDeviceGetName + 0.01% 115.45us 4 28.861us 9.8250us 44.526us cuDeviceTotalMem + 0.01% 83.988us 4 20.997us 578ns 77.760us cudaSetDevice + 0.00% 38.918us 1 38.918us 38.918us 38.918us cudaEventCreate + 0.00% 34.573us 31 1.1150us 279ns 12.784us cudaDeviceGetAttribute + 0.00% 17.767us 1 17.767us 17.767us 17.767us cudaProfilerStart + 0.00% 15.228us 2 7.6140us 3.5460us 11.682us cudaConfigureCall + 0.00% 14.536us 2 7.2680us 1.1490us 13.387us cudaGetLastError + 0.00% 8.6080us 26 331ns 173ns 783ns cudaSetupArgument + 0.00% 5.5470us 6 924ns 215ns 2.6780us cuDeviceGet + 0.00% 5.4090us 6 901ns 328ns 3.3320us cuDeviceGetCount + 0.00% 4.1770us 3 1.3920us 1.0630us 1.8300us cuDriverGetVersion + 0.00% 3.4650us 3 1.1550us 1.0810us 1.2680us cuInit + 0.00% 830ns 1 830ns 830ns 830ns cudaRuntimeGetVersion + + +nvvp profiler +------------- + +For visual profiler **nvvp**, you can either import the output of :code:`nvprof –o ...` or +run application through GUI. + +**Note: nvvp also support CPU profiling** (Click the box in nvvp to enable profile execution on CPU). + +.. image:: nvvp1.png + :align: center + :scale: 33% + +From the perspective of kernel functions, **nvvp** can even illustrate why does an operation take a long time? +As shown in the following figure, kernel's block usage, register usage and shared memory usage from :code:`nvvp` +allow us to fully utilize all warps on the GPU. + +.. image:: nvvp2.png + :align: center + :scale: 33% + +From the perspective of application, **nvvp** can give you some suggestions to address performance bottleneck. +For instance, some advice in data movement and compute utilization from the below figure can guide you to tune performance. + +.. image:: nvvp3.png + :align: center + :scale: 33% + +.. image:: nvvp4.png + :align: center + :scale: 33% + +Profiling tips +============== + +- The **nvprof** and **nvvp** output is a very good place to start. +- The timeline is a good place to go next. +- Only dig deep into a kernel if it’s taking a significant amount of your time. +- Where possible, try to match profiler output with theory. + 1) For example, if I know I’m moving 1GB, and my kernel takes 10ms, I expect the profiler to report 100GB/s. + 2) Discrepancies are likely to mean your application isn’t doing what you thought it was. +- Know your hardware: If your GPU can do 6 TFLOPs, and you’re already doing 5.5 TFLOPs, you won’t go much faster! + + +Profiling is a key step in optimization. Sometimes quite simple changes can lead to big improvements in performance. +Your mileage may vary! + +Reference +========= +Jeremy Appleyard, `GPU Profiling for Deep Learning `_, 2015 diff --git a/doc/optimization/index.rst b/doc/optimization/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..c9e87e0778dfe44fa3d1bb84d0ad340aa6f25d08 --- /dev/null +++ b/doc/optimization/index.rst @@ -0,0 +1,7 @@ +Performance Tuning +================== + +.. toctree:: + :maxdepth: 3 + + gpu_profiling.rst diff --git a/doc/optimization/nvvp1.png b/doc/optimization/nvvp1.png new file mode 100644 index 0000000000000000000000000000000000000000..1af23ac3c52929b2b0645d2f9fa4d4c6db1f6e77 Binary files /dev/null and b/doc/optimization/nvvp1.png differ diff --git a/doc/optimization/nvvp2.png b/doc/optimization/nvvp2.png new file mode 100644 index 0000000000000000000000000000000000000000..177c9db708da6863d1075f3e615f5962dbe18b29 Binary files /dev/null and b/doc/optimization/nvvp2.png differ diff --git a/doc/optimization/nvvp3.png b/doc/optimization/nvvp3.png new file mode 100644 index 0000000000000000000000000000000000000000..d8f393667d6569b6f1e61ffccac43fae5888b6db Binary files /dev/null and b/doc/optimization/nvvp3.png differ diff --git a/doc/optimization/nvvp4.png b/doc/optimization/nvvp4.png new file mode 100644 index 0000000000000000000000000000000000000000..51f2f3e183295de6cf8ddaf2b3b8a0862aa35f01 Binary files /dev/null and b/doc/optimization/nvvp4.png differ diff --git a/doc_cn/algorithm/rnn/hierarchical-layer.md b/doc_cn/algorithm/rnn/hierarchical-layer.rst similarity index 50% rename from doc_cn/algorithm/rnn/hierarchical-layer.md rename to doc_cn/algorithm/rnn/hierarchical-layer.rst index 519653df081d6e7919ada3cbff6aaf4d2a2f6115..a9906b8b9c2036ae349f30d7edee770884f73f99 100644 --- a/doc_cn/algorithm/rnn/hierarchical-layer.md +++ b/doc_cn/algorithm/rnn/hierarchical-layer.rst @@ -1,6 +1,11 @@ -# 支持双层序列作为输入的Layer +########################### +支持双层序列作为输入的Layer +########################### -## 概述 +.. contents:: + +概述 +==== 在自然语言处理任务中,序列是一种常见的数据类型。一个独立的词语,可以看作是一个非序列输入,或者,我们称之为一个0层的序列;由词语构成的句子,是一个单层序列;若干个句子构成一个段落,是一个双层的序列。 @@ -12,55 +17,79 @@ + 单层序列:排成一列的多个元素,每个元素是一个0层序列,元素之间的顺序是重要的输入信息 + 双层序列:排成一列的多个元素,每个元素是一个单层序列,称之为双层序列的一个子序列(subseq),subseq的每个元素是一个0层序列 - 在 PaddlePaddle中,下面这些Layer能够接受双层序列作为输入,完成相应的计算。 -## pooling_layer - -pooling_layer的使用示例如下,详细见配置API。 -```python -seq_pool = pooling_layer(input=layer, - pooling_type=AvgPooling(), - agg_level=AggregateLevel.EACH_SEQUENCE) -``` + +pooling_layer +============== + +pooling_layer 的使用示例如下,详细见 `pooling_layer`_ 配置API。 + +.. code-block:: bash + + seq_pool = pooling_layer(input=layer, + pooling_type=AvgPooling(), + agg_level=AggregateLevel.EACH_SEQUENCE) + - `pooling_type` 目前支持两种,分别是:MaxPooling()和AvgPooling()。 -- `agg_level=AggregateLevel.TIMESTEP`时(默认值): + +- `agg_level=AggregateLevel.TIMESTEP` 时(默认值): + - 作用:双层序列经过运算变成一个0层序列,或单层序列经过运算变成一个0层序列 - 输入:一个双层序列,或一个单层序列 - 输出:一个0层序列,即整个输入序列(单层或双层)的平均值(或最大值) -- `agg_level=AggregateLevel.EACH_SEQUENCE`时: + +- `agg_level=AggregateLevel.EACH_SEQUENCE` 时: + - 作用:一个双层序列经过运算变成一个单层序列 - 输入:必须是一个双层序列 - 输出:一个单层序列,序列的每个元素是原来双层序列每个subseq元素的平均值(或最大值) -## last_seq 和 first_seq +last_seq 和 first_seq +===================== + +last_seq 的使用示例如下( `first_seq`_ 类似),详细见 `last_seq`_ 配置API。 + +.. code-block:: bash + + last = last_seq(input=layer, + agg_level=AggregateLevel.EACH_SEQUENCE) + +- `agg_level=AggregateLevel.TIMESTEP` 时(默认值): -last_seq的使用示例如下(first_seq类似),详细见配置API。 -```python -last = last_seq(input=layer, - agg_level=AggregateLevel.EACH_SEQUENCE) -``` -- `agg_level=AggregateLevel.TIMESTEP`时(默认值): - 作用:一个双层序列经过运算变成一个0层序列,或一个单层序列经过运算变成一个0层序列 - 输入:一个双层序列或一个单层序列 - 输出:一个0层序列,即整个输入序列(双层或者单层)最后一个,或第一个元素。 -- `agg_level=AggregateLevel.EACH_SEQUENCE`时: + +- `agg_level=AggregateLevel.EACH_SEQUENCE` 时: - 作用:一个双层序列经过运算变成一个单层序列 - 输入:必须是一个双层序列 - 输出:一个单层序列,其中每个元素是双层序列中每个subseq最后一个(或第一个)元素。 -## expand_layer +expand_layer +============ + +expand_layer 的使用示例如下,详细见 `expand_layer`_ 配置API。 + +.. code-block:: bash + + expand = expand_layer(input=layer1, + expand_as=layer2, + expand_level=ExpandLevel.FROM_TIMESTEP) + +- `expand_level=ExpandLevel.FROM_TIMESTEP` 时(默认值): -expand_layer的使用示例如下,详细见配置API。 -```python -expand = expand_layer(input=layer1, - expand_as=layer2, - expand_level=ExpandLevel.FROM_TIMESTEP) -``` -- `expand_level=ExpandLevel.FROM_TIMESTEP`时(默认值): - 作用:一个0层序列经过运算扩展成一个单层序列,或者一个双层序列 - - 输入:layer1必须是一个0层序列,是待扩展的数据;layer2可以是一个单层序列,或者是一个双层序列,提供扩展的长度信息 - - 输出:一个单层序列,或一个双层序列,输出序列的类型(双层序列,或单层序列)和序列中含有元素的数目同 layer2一致。若输出是单层序列,单层序列的每个元素(0层序列),都是对layer1元素的拷贝;若输出是双层序列,双层序列每个subseq中每个元素(0层序列),都是对layer1元素的拷贝 -- `expand_level=ExpandLevel.FROM_SEQUENCE`时: + - 输入:layer1必须是一个0层序列,是待扩展的数据;layer2 可以是一个单层序列,或者是一个双层序列,提供扩展的长度信息 + - 输出:一个单层序列或一个双层序列,输出序列的类型(双层序列或单层序列)和序列中含有元素的数目同 layer2 一致。若输出是单层序列,单层序列的每个元素(0层序列),都是对layer1元素的拷贝;若输出是双层序列,双层序列每个subseq中每个元素(0层序列),都是对layer1元素的拷贝 + +- `expand_level=ExpandLevel.FROM_SEQUENCE` 时: + - 作用:一个单层序列经过运算扩展成一个双层序列 - - 输入:layer1必须是一个单层序列,是待扩展的数据;layer2必须是一个双层序列,提供扩展的长度信息 - - 输出:一个双层序列,序列中含有元素的数目同layer2一致。要求单层序列含有元素的数目(0层序列),和双层序列含有subseq 的数目一致。单层序列第i个元素(0层序列),被扩展为一个单层序列,构成了输出双层序列的第i个subseq。 + - 输入:layer1必须是一个单层序列,是待扩展的数据;layer2 必须是一个双层序列,提供扩展的长度信息 + - 输出:一个双层序列,序列中含有元素的数目同 layer2 一致。要求单层序列含有元素的数目(0层序列)和双层序列含有subseq 的数目一致。单层序列第i个元素(0层序列),被扩展为一个单层序列,构成了输出双层序列的第i个 subseq 。 + + +.. _pooling_layer: ../../../doc/ui/api/trainer_config_helpers/layers.html#pooling-layer +.. _last_seq: ../../../doc/ui/api/trainer_config_helpers/layers.html#last-seq +.. _first_seq: ../../../doc/ui/api/trainer_config_helpers/layers.html#first-seq +.. _expand_layer: ../../../doc/ui/api/trainer_config_helpers/layers.html#expand-layer diff --git a/doc_cn/build_and_install/install/ubuntu_install.rst b/doc_cn/build_and_install/install/ubuntu_install.rst index 70ac5225bd82e40838875b49f67e70ff08eff853..4500d6e0b03be9280e3e6c25cddbf7fb389671b8 100644 --- a/doc_cn/build_and_install/install/ubuntu_install.rst +++ b/doc_cn/build_and_install/install/ubuntu_install.rst @@ -1,35 +1,42 @@ -使用deb包在Ubuntu上安装PaddlePaddle +Ubuntu部署PaddlePaddle =================================== -PaddlePaddle目前支持使用deb包安装。Paddle的 :code:`deb` 安装包在ubuntu 14.04中正确,但理论上支持其他的 debian 发行版。 +PaddlePaddle提供了ubuntu 14.04 deb安装包。 +安装 +------ -PaddlePaddle的ubuntu安装包分为四个版本,他们是 cpu、gpu、cpu-noavx、gpu-noavx 四个版本。其中 noavx 用于不支持AVX指令集的cpu。安装包的下载地址是\: https://github.com/baidu/Paddle/releases/ +安装包的下载地址是\: https://github.com/PaddlePaddle/Paddle/releases +它包含四个版本\: -用户需要先将PaddlePaddle安装包下载到本地,然后执行如下 :code:`gdebi` 命令即可完成安装。 +* cpu版本: 支持主流x86处理器平台, 使用了avx指令集。 -.. code-block:: shell +* cpu-noavx版本:支持主流x86处理器平台,没有使用avx指令集。 - gdebi paddle-*-cpu.deb +* gpu版本:支持主流x86处理器平台,支持nvidia cuda平台,使用了avx指令集。 + +* gpu-noavx版本:支持主流x86处理器平台,支持nvidia cuda平台,没有使用avx指令集。 -如果 :code:`gdebi` 没有安装,则需要使用 :code:`sudo apt-get install gdebi`, 来安装 :code:`gdebi` 。 +下载完相关安装包后,执行: +.. code-block:: shell + + sudo apt-get install gdebi + gdebi paddle-*-cpu.deb -或者使用下面一条命令安装. +或者: .. code-block:: shell dpkg -i paddle-*-cpu.deb apt-get install -f + 在 :code:`dpkg -i` 的时候如果报一些依赖未找到的错误是正常的, 在 :code:`apt-get install -f` 里会继续安装 PaddlePaddle。 -需要注意的是,如果使用GPU版本的PaddlePaddle,请安装CUDA 7.5 和CUDNN 5到本地环境中, -并设置好对应的环境变量(LD_LIBRARY_PATH等等)。 - -安装完成后,可以使用命令 :code:`paddle version` 查看安装后的paddle 版本。可能的输出为 +安装完成后,可以使用命令 :code:`paddle version` 查看安装后的paddle 版本: .. literalinclude:: paddle_version.txt @@ -39,45 +46,16 @@ PaddlePaddle的ubuntu安装包分为四个版本,他们是 cpu、gpu、cpu-noa libcudart.so/libcudnn.so找不到 ++++++++++++++++++++++++++++++ -安装完成PaddlePaddle后,运行 :code:`paddle train` 报错\: - -.. code-block:: shell - - 0831 12:36:04.151525 1085 hl_dso_loader.cc:70] Check failed: nullptr != *dso_handle For Gpu version of PaddlePaddle, it couldn't find CUDA library: libcudart.so Please make sure you already specify its path.Note: for training data on Cpu using Gpu version of PaddlePaddle,you must specify libcudart.so via LD_LIBRARY_PATH. - -PaddlePaddle使用运行时动态连接CUDA的so,如果在 LD_LIBRARY_PATH里面找不到这些动态 -库的话,会报寻找不到这些动态库。 +安装完成后,运行 :code:`paddle train` 报错\: -解决方法很简单,就是将这些动态库加到环境变量里面。比较可能的命令如下。 +.. code-block:: shell -.. code-block:: text + 0831 12:36:04.151525 1085 hl_dso_loader.cc:70] Check failed: nullptr != *dso_handle For Gpu version of PaddlePaddle, it couldn't find CUDA library: libcudart.so Please make sure you already specify its path.Note: for training data on Cpu using Gpu version of PaddlePaddle,you must specify libcudart.so via LD_LIBRARY_PATH. - export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH +原因是未设置cuda运行时环境变量。 如果使用GPU版本的PaddlePaddle,请安装CUDA 7.5 和CUDNN 5到本地环境中,并设置: -CUDA Driver找不到 -+++++++++++++++++ - -运行 :code:`paddle train` 报错\: - -.. code-block:: text - - F0831 12:39:16.699000 1090 hl_cuda_device.cc:530] Check failed: cudaSuccess == cudaStat (0 vs. 35) Cuda Error: CUDA driver version is insufficient for CUDA runtime version - -PaddlePaddle运行时如果没有寻找到cuda的driver,变会报这个错误。解决办法是将cuda -driver添加到LD_LIBRARY_PATH中。比较可能的命令如下。 - -.. code-block:: text - - export LD_LIBRARY_PATH=/usr/lib64:$LD_LIBRARY_PATH - -config文件找不到 -++++++++++++++++ - -运行 :code:`paddle train` 得到结果\: - -.. code-block:: text +.. code-block:: shell - F0831 20:53:07.525789 1302 TrainerMain.cpp:94] Check failed: config != nullptr no valid config + export LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/cuda/lib:$LD_LIBRARY_PATH + export PATH=/usr/local/cuda/bin:$PATH -PaddlePaddle在运行时找不到对应的config文件,说明命令行参数 :code:`config` 没有设置。 -而这个一般说明PaddlePaddle已经安装完毕了。 \ No newline at end of file diff --git a/doc_cn/build_and_install/paddle_on_kubernetes.md b/doc_cn/build_and_install/paddle_on_kubernetes.md new file mode 100644 index 0000000000000000000000000000000000000000..f8c9f19a9fef50c03f6ffee639a580adbf29844a --- /dev/null +++ b/doc_cn/build_and_install/paddle_on_kubernetes.md @@ -0,0 +1,205 @@ +# Paddle On Kubernetes:单机训练 + +在这篇文档里,我们介绍如何在 Kubernetes 集群上启动一个单机使用CPU的Paddle训练作业。在下一篇中,我们将介绍如何启动分布式训练作业。 + +## 制作Docker镜像 + +在一个功能齐全的Kubernetes机群里,通常我们会安装Ceph等分布式文件系统来存储训练数据。这样的话,一个分布式Paddle训练任务中的每个进程都可以从Ceph读取数据。在这个例子里,我们只演示一个单机作业,所以可以简化对环境的要求,把训练数据直接放在 +Paddle的Docker image里。为此,我们需要制作一个包含训练数据的Paddle镜像。 + +Paddle 的 [Quick Start Tutorial](http://www.paddlepaddle.org/doc/demo/quick_start/index_en.html) +里介绍了用Paddle源码中的脚本下载训练数据的过程。 +而 `paddledev/paddle:cpu-demo-latest` 镜像里有 Paddle 源码与demo,( 请注意,默认的 +Paddle镜像 `paddledev/paddle:cpu-latest` 是不包括源码的, Paddle的各版本镜像可以参考 [Docker installation guide](http://www.paddlepaddle.org/doc/build/docker_install.html) ),所以我们使用这个镜像来下载训练数据到Docker container中,然后把这个包含了训练数据的container保存为一个新的镜像。 + +### 运行容器 + +``` +$ docker run --name quick_start_data -it paddledev/paddle:cpu-demo-latest +``` + +### 下载数据 + +进入容器`/root/paddle/demo/quick_start/data`目录,使用`get_data.sh`下载数据 + +``` +$ root@fbd1f2bb71f4:~/paddle/demo/quick_start/data# ./get_data.sh + +Downloading Amazon Electronics reviews data... +--2016-10-31 01:33:43-- http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/reviews_Electronics_5.json.gz +Resolving snap.stanford.edu (snap.stanford.edu)... 171.64.75.80 +Connecting to snap.stanford.edu (snap.stanford.edu)|171.64.75.80|:80... connected. +HTTP request sent, awaiting response... 200 OK +Length: 495854086 (473M) [application/x-gzip] +Saving to: 'reviews_Electronics_5.json.gz' + + 10% [=======> ] 874,279 64.7KB/s eta 2h 13m + +``` + +### 修改启动脚本 + +下载完数据后,修改`/root/paddle/demo/quick_start/train.sh`文件,内容如下(增加了一条cd命令) +``` +set -e +cd /root/paddle/demo/quick_start +cfg=trainer_config.lr.py +#cfg=trainer_config.emb.py +#cfg=trainer_config.cnn.py +#cfg=trainer_config.lstm.py +#cfg=trainer_config.bidi-lstm.py +#cfg=trainer_config.db-lstm.py +paddle train \ + --config=$cfg \ + --save_dir=./output \ + --trainer_count=4 \ + --log_period=20 \ + --num_passes=15 \ + --use_gpu=false \ + --show_parameter_stats_period=100 \ + --test_all_data_in_one_period=1 \ + 2>&1 | tee 'train.log' +``` + +### 提交镜像 + +修改启动脚本后,退出容器,使用`docker commit`命令创建新镜像。 + +``` +$ docker commit quick_start_data mypaddle/paddle:quickstart +``` + +## 使用 Kubernetes 进行训练 + +>针对任务运行完成后容器自动退出的场景,Kubernetes有Job类型的资源来支持。下文就是用Job类型的资源来进行训练。 + +### 编写yaml文件 + +在训练时,输出结果可能会随着容器的消耗而被删除,需要在创建容器前挂载卷以便我们保存训练结果。使用我们之前构造的镜像,可以创建一个 [Kubernetes Job](http://kubernetes.io/docs/user-guide/jobs/#what-is-a-job),简单的yaml文件如下: + +``` +apiVersion: batch/v1 +kind: Job +metadata: + name: quickstart +spec: + parallelism: 1 + completions: 1 + template: + metadata: + name: quickstart + spec: + volumes: + - name: output + hostPath: + path: /home/work/paddle_output + containers: + - name: pi + image: mypaddle/paddle:quickstart + command: ["bin/bash", "-c", "/root/paddle/demo/quick_start/train.sh"] + volumeMounts: + - name: output + mountPath: /root/paddle/demo/quick_start/output + restartPolicy: Never +``` + +### 创建Paddle Job + +使用上文创建的yaml文件创建Kubernetes Job,命令为: + +``` +$ kubectl create -f paddle.yaml +``` + +查看job的详细情况: + +``` +$ kubectl get job +NAME DESIRED SUCCESSFUL AGE +quickstart 1 0 58s + +$ kubectl describe job quickstart +Name: quickstart +Namespace: default +Image(s): registry.baidu.com/public/paddle:cpu-demo-latest +Selector: controller-uid=f120da72-9f18-11e6-b363-448a5b355b84 +Parallelism: 1 +Completions: 1 +Start Time: Mon, 31 Oct 2016 11:20:16 +0800 +Labels: controller-uid=f120da72-9f18-11e6-b363-448a5b355b84,job-name=quickstart +Pods Statuses: 0 Running / 1 Succeeded / 0 Failed +Volumes: + output: + Type: HostPath (bare host directory volume) + Path: /home/work/paddle_output +Events: + FirstSeen LastSeen Count From SubobjectPath Type Reason Message + --------- -------- ----- ---- ------------- -------- ------ ------- + 1m 1m 1 {job-controller } Normal SuccessfulCreate Created pod: quickstart-fa0wx +``` + +### 查看训练结果 + +根据Job对应的Pod信息,可以查看此Pod运行的宿主机。 + +``` +kubectl describe pod quickstart-fa0wx +Name: quickstart-fa0wx +Namespace: default +Node: paddle-demo-let02/10.206.202.44 +Start Time: Mon, 31 Oct 2016 11:20:17 +0800 +Labels: controller-uid=f120da72-9f18-11e6-b363-448a5b355b84,job-name=quickstart +Status: Succeeded +IP: 10.0.0.9 +Controllers: Job/quickstart +Containers: + quickstart: + Container ID: docker://b8561f5c79193550d64fa47418a9e67ebdd71546186e840f88de5026b8097465 + Image: registry.baidu.com/public/paddle:cpu-demo-latest + Image ID: docker://18e457ce3d362ff5f3febf8e7f85ffec852f70f3b629add10aed84f930a68750 + Port: + Command: + bin/bash + -c + /root/paddle/demo/quick_start/train.sh + QoS Tier: + cpu: BestEffort + memory: BestEffort + State: Terminated + Reason: Completed + Exit Code: 0 + Started: Mon, 31 Oct 2016 11:20:20 +0800 + Finished: Mon, 31 Oct 2016 11:21:46 +0800 + Ready: False + Restart Count: 0 + Environment Variables: +Conditions: + Type Status + Ready False +Volumes: + output: + Type: HostPath (bare host directory volume) + Path: /home/work/paddle_output +``` + +我们还可以登录到宿主机上查看训练结果。 + +``` +[root@paddle-demo-let02 paddle_output]# ll +total 60 +drwxr-xr-x 2 root root 4096 Oct 31 11:20 pass-00000 +drwxr-xr-x 2 root root 4096 Oct 31 11:20 pass-00001 +drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00002 +drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00003 +drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00004 +drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00005 +drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00006 +drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00007 +drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00008 +drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00009 +drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00010 +drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00011 +drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00012 +drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00013 +drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00014 +``` diff --git a/doc_cn/cluster/k8s/Dockerfile b/doc_cn/cluster/k8s/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..3a73606c61432329b4cc2d2f8daadc5af8735c96 --- /dev/null +++ b/doc_cn/cluster/k8s/Dockerfile @@ -0,0 +1,7 @@ +FROM paddledev/paddle:cpu-latest + +MAINTAINER zjsxzong89@gmail.com + +COPY start.sh /root/ +COPY start_paddle.py /root/ +CMD ["bash"," -c","/root/start.sh"] \ No newline at end of file diff --git a/doc_cn/cluster/k8s/distributed_training_on_kubernetes.md b/doc_cn/cluster/k8s/distributed_training_on_kubernetes.md new file mode 100644 index 0000000000000000000000000000000000000000..d9ed431ec0566cf90f11ebaeec56560ff69e71fe --- /dev/null +++ b/doc_cn/cluster/k8s/distributed_training_on_kubernetes.md @@ -0,0 +1,309 @@ + +# PaddlePaddle on Kubernetes:分布式训练 + +前一篇文章介绍了如何在Kubernetes集群上启动一个单机PaddlePaddle训练作业 (Job)。在这篇文章里,我们介绍如何在Kubernetes集群上进行分布式PaddlePaddle训练作业。关于PaddlePaddle的分布式训练,文章 [Cluster Training](https://github.com/baidu/Paddle/blob/develop/doc/cluster/opensource/cluster_train.md)介绍了一种通过SSH远程分发任务,进行分布式训练的方法,与此不同的是,本文将介绍在Kubernetes容器管理平台上快速构建PaddlePaddle容器集群,进行分布式训练的方案。 + +## Kubernetes 基本概念 + +[*Kubernetes*](http://kubernetes.io/)是Google开源的容器集群管理系统,其提供应用部署、维护、 扩展机制等功能,利用Kubernetes能方便地管理跨机器运行容器化的应用。Kubernetes可以在物理机或虚拟机上运行,且支持部署到[AWS](http://kubernetes.io/docs/getting-started-guides/aws),[Azure](http://kubernetes.io/docs/getting-started-guides/azure/),[GCE](http://kubernetes.io/docs/getting-started-guides/gce)等多种公有云环境。介绍分布式训练之前,需要对[Kubernetes](http://kubernetes.io/)有一个基本的认识,下面先简要介绍一下本文用到的几个Kubernetes概念。 + +- [*Node*](http://kubernetes.io/docs/admin/node/) 表示一个Kubernetes集群中的一个工作节点,这个节点可以是物理机或者虚拟机,Kubernetes集群就是由node节点与master节点组成的。 + +- [*Pod*](http://kubernetes.io/docs/user-guide/pods/) 是一组(一个或多个)容器,pod是Kubernetes的最小调度单元,一个pod中的所有容器会被调度到同一个node上。Pod中的容器共享NET,PID,IPC,UTS等Linux namespace。由于容器之间共享NET namespace,所以它们使用同一个IP地址,可以通过*localhost*互相通信。不同pod之间可以通过IP地址访问。 + +- [*Job*](http://kubernetes.io/docs/user-guide/jobs/) 是Kubernetes上运行的作业,一次作业称为一个job,通常每个job包括一个或者多个pods。 + +- [*Volume*](http://kubernetes.io/docs/user-guide/volumes/) 存储卷,是pod内的容器都可以访问的共享目录,也是容器与node之间共享文件的方式,因为容器内的文件都是暂时存在的,当容器因为各种原因被销毁时,其内部的文件也会随之消失。通过volume,就可以将这些文件持久化存储。Kubernetes支持多种volume,例如hostPath(宿主机目录),gcePersistentDisk,awsElasticBlockStore等。 + +- [*Namespaces*](http://kubernetes.io/docs/user-guide/volumes/) 命名空间,在kubernetes中创建的所有资源对象(例如上文的pod,job)等都属于一个命名空间,在同一个命名空间中,资源对象的名字是唯一的,不同空间的资源名可以重复,命名空间主要为了对象进行逻辑上的分组便于管理。本文只使用了默认命名空间。 + +## 整体方案 + +### 部署Kubernetes集群 + +首先,我们需要拥有一个Kubernetes集群,在这个集群中所有node与pod都可以互相通信。关于Kubernetes集群搭建,可以参考[官方文档](http://kubernetes.io/docs/getting-started-guides/kubeadm/),在以后的文章中我们也会介绍AWS上搭建的方案。本文假设大家能找到几台物理机,并且可以按照官方文档在上面部署Kubernetes。在本文的环境中,Kubernetes集群中所有node都挂载了一个[MFS](http://moosefs.org/)(Moose filesystem,一种分布式文件系统)共享目录,我们通过这个目录来存放训练文件与最终输出的模型。关于MFS的安装部署,可以参考[MooseFS documentation](https://moosefs.com/documentation.html)。在训练之前,用户将配置与训练数据切分好放在MFS目录中,训练时,程序从此目录拷贝文件到容器内进行训练,将结果保存到此目录里。整体的结构图如下: + +![paddle on kubernetes结构图](k8s-paddle-arch.png) + +上图描述了一个3节点的分布式训练场景,Kubernetes集群的每个node上都挂载了一个MFS目录,这个目录可以通过volume的形式挂载到容器中。Kubernetes为这次训练创建了3个pod并且调度到了3个node上运行,每个pod包含一个PaddlePaddle容器。在容器创建后,会启动pserver与trainer进程,读取volume中的数据进行这次分布式训练。 + +### 使用 Job + +我们使用Kubernetes中的job这个概念来代表一次分布式训练。Job表示一次性作业,在作业完成后,Kubernetes会销毁job产生的容器并且释放相关资源。 + +在Kubernetes中,可以通过编写一个YAML文件,来描述这个job,在这个文件中,主要包含了一些配置信息,例如PaddlePaddle的节点个数,`paddle pserver`开放的端口个数与端口号,使用的网卡设备等,这些信息通过环境变量的形式传递给容器内的程序使用。 + +在一次分布式训练中,用户确定好本次训练需要的PaddlePaddle节点个数,将切分好的训练数据与配置文件上传到MFS共享目录中。然后编写这次训练的job YAML文件,提交给Kubernetes集群创建并开始作业。 + +### 创建PaddlePaddle节点 + +当Kubernetes master收到请求,解析完YAML文件后,会创建出多个pod(个数为PaddlePaddle节点数),Kubernetes会把这些pod调度到集群的node上运行。一个pod就代表一个PaddlePaddle节点,当pod被成功分配到一台物理/虚拟机上后,Kubernetes会启动pod内的容器,这个容器会根据YAML文件中的环境变量,启动`paddle pserver`与`paddle train`进程。 + +### 启动训练 + +在容器启动后,会通过脚本来启动这次分布式训练,我们知道`paddle train`进程启动时需要知道其他节点的IP地址以及本节点的trainer_id,由于PaddlePaddle本身不提供类似服务发现的功能,所以在本文的启动脚本中,每个节点会根据job name向Kubernetes apiserver查询这个job对应的所有pod信息(Kubernetes默认会在每个容器的环境变量中写入apiserver的地址)。 + +根据这些pod信息,就可以通过某种方式,为每个pod分配一个唯一的trainer_id。本文把所有pod的IP地址进行排序,将顺序作为每个PaddlePaddle节点的trainer_id。启动脚本的工作流程大致如下: + + 1. 查询Kubernetes apiserver获取pod信息,根据IP分配trainer_id + 1. 从MFS共享目录中拷贝训练文件到容器内 + 1. 根据环境变量,解析出`paddle pserver`与`paddle train`的启动参数,启动进程 + 1. 训练时,PaddlePaddle会自动将结果保存在trainer_id为0的节点上,将输出路径设置为MFS目录,保存输出的文件 + + +## 搭建过程 + +根据前文的描述,要在已有的Kubernetes集群上进行PaddlePaddle的分布式训练,主要分为以下几个步骤: + +1. 制作PaddlePaddle镜像 +1. 将训练文件与切分好的数据上传到共享存储 +1. 编写本次训练的YAML文件,创建一个Kubernetes job +1. 训练结束后查看输出结果 + +下面就根据这几个步骤分别介绍。 + + +### 制作镜像 + +PaddlePaddle镜像需要提供`paddle pserver`与`paddle train`进程的运行环境,用这个镜像创建的容器需要有以下两个功能: + +- 拷贝训练文件到容器内 + +- 生成`paddle pserver`与`paddle train`进程的启动参数,并且启动训练 + +因为官方镜像 `paddledev/paddle:cpu-latest` 内已经包含PaddlePaddle的执行程序但是还没上述功能,所以我们可以在这个基础上,添加启动脚本,制作新镜像来完成以上的工作。镜像的*Dockerfile*如下: + +```Dockerfile +FROM paddledev/paddle:cpu-latest + +MAINTAINER zjsxzong89@gmail.com + +COPY start.sh /root/ +COPY start_paddle.py /root/ +CMD ["bash"," -c","/root/start.sh"] +``` + +[`start.sh`](start.sh)文件拷贝训练文件到容器内,然后执行[`start_paddle.py`](start_paddle.py)脚本启动训练,前文提到的获取其他节点IP地址,分配`trainer_id`等都在`start_paddle.py`脚本中完成。 + +`start_paddle.py`脚本开始时,会先进行参数的初始化与解析。 + +```python +parser = argparse.ArgumentParser(prog="start_paddle.py", + description='simple tool for k8s') + args, train_args_list = parser.parse_known_args() + train_args = refine_unknown_args(train_args_list) + train_args_dict = dict(zip(train_args[:-1:2], train_args[1::2])) + podlist = getPodList() +``` + +然后通过函数`getPodList()`访问Kubernetes的接口来查询此job对应的所有pod信息。当所有pod都处于running状态(容器运行都运行)时,再通过函数`getIdMap(podlist)`获取trainer_id。 + +```python + podlist = getPodList() + # need to wait until all pods are running + while not isPodAllRunning(podlist): + time.sleep(10) + podlist = getPodList() + idMap = getIdMap(podlist) +``` + +在函数`getIdMap(podlist)`内部,我们通过读取`podlist`中每个pod的IP地址,将IP排序生成的序号作为trainer_id。 + +```python +def getIdMap(podlist): + ''' + generate tainer_id by ip + ''' + ips = [] + for pod in podlist["items"]: + ips.append(pod["status"]["podIP"]) + ips.sort() + idMap = {} + for i in range(len(ips)): + idMap[ips[i]] = i + return idMap +``` + +在得到`idMap`后,通过函数`startPaddle(idMap, train_args_dict)`构造`paddle pserver`与`paddle train`的启动参数并执行进程。 + +在函数`startPaddle`中,最主要的工作就是解析出`paddle pserver`与`paddle train`的启动参数。例如`paddle train`参数的解析,解析环境变量得到`PADDLE_NIC`,`PADDLE_PORT`,`PADDLE_PORTS_NUM`等参数,然后通过自身的IP地址在`idMap`中获取`trainerId`。 + +```python + program = 'paddle train' + args = " --nics=" + PADDLE_NIC + args += " --port=" + str(PADDLE_PORT) + args += " --ports_num=" + str(PADDLE_PORTS_NUM) + args += " --comment=" + "paddle_process_by_paddle" + ip_string = "" + for ip in idMap.keys(): + ip_string += (ip + ",") + ip_string = ip_string.rstrip(",") + args += " --pservers=" + ip_string + args_ext = "" + for key, value in train_args_dict.items(): + args_ext += (' --' + key + '=' + value) + localIP = socket.gethostbyname(socket.gethostname()) + trainerId = idMap[localIP] + args += " " + args_ext + " --trainer_id=" + \ + str(trainerId) + " --save_dir=" + JOB_PATH_OUTPUT +``` + +使用 `docker build` 构建镜像: + +```bash +docker build -t your_repo/paddle:mypaddle . +``` + +然后将构建成功的镜像上传到镜像仓库。 + +```bash +docker push your_repo/paddle:mypaddle +``` + +### 上传训练文件 + +本文使用PaddlePaddle官方的[recommendation demo](http://www.paddlepaddle.org/doc/demo/index.html#recommendation)作为这次训练的内容,我们将训练文件与数据放在一个job name命名的目录中,上传到MFS共享存储。完成后MFS上的文件内容大致如下: + +```bash +[root@paddle-kubernetes-node0 mfs]# tree -d +. +└── paddle-cluster-job + ├── data + │   ├── 0 + │   │ + │   ├── 1 + │   │ + │   └── 2 + ├── output + └── recommendation +``` + +目录中paddle-cluster-job是本次训练对应的job name,本次训练要求有3个PaddlePaddle节点,在paddle-cluster-job/data目录中存放切分好的数据,文件夹0,1,2分别代表3个节点的trainer_id。recommendation文件夹内存放训练文件,output文件夹存放训练结果与日志。 + +### 创建Job + +Kubernetes可以通过YAML文件来创建相关对象,然后可以使用命令行工具创建job。 + +Job YAML文件描述了这次训练使用的Docker镜像,需要启动的节点个数以及 `paddle pserver`与 `paddle train`进程启动的必要参数,也描述了容器需要使用的存储卷挂载的情况。YAML文件中各个字段的具体含义,可以查看[Kubernetes Job API](http://kubernetes.io/docs/api-reference/batch/v1/definitions/#_v1_job)。例如,本次训练的YAML文件可以写成: + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: paddle-cluster-job +spec: + parallelism: 3 + completions: 3 + template: + metadata: + name: paddle-cluster-job + spec: + volumes: + - name: jobpath + hostPath: + path: /home/work/mfs + containers: + - name: trainer + image: your_repo/paddle:mypaddle + command: ["bin/bash", "-c", "/root/start.sh"] + env: + - name: JOB_NAME + value: paddle-cluster-job + - name: JOB_PATH + value: /home/jobpath + - name: JOB_NAMESPACE + value: default + - name: TRAIN_CONFIG_DIR + value: recommendation + - name: CONF_PADDLE_NIC + value: eth0 + - name: CONF_PADDLE_PORT + value: "7164" + - name: CONF_PADDLE_PORTS_NUM + value: "2" + - name: CONF_PADDLE_PORTS_NUM_SPARSE + value: "2" + - name: CONF_PADDLE_GRADIENT_NUM + value: "3" + volumeMounts: + - name: jobpath + mountPath: /home/jobpath + restartPolicy: Never +``` + +文件中,`metadata`下的`name`表示这个job的名字。`parallelism,completions`字段表示这个job会同时开启3个PaddlePaddle节点,成功训练且退出的pod数目为3时,这个job才算成功结束。然后申明一个存储卷`jobpath`,代表宿主机目录`/home/work/mfs`,在对容器的描述`containers`字段中,将此目录挂载为容器的`/home/jobpath`目录,这样容器的`/home/jobpath`目录就成为了共享存储,放在这个目录里的文件其实是保存到了MFS上。 + +`env`字段表示容器的环境变量,我们将`paddle`运行的一些参数通过这种方式传递到容器内。 + +`JOB_PATH`表示共享存储挂载的路径,`JOB_NAME`表示job名字,`TRAIN_CONFIG_DIR`表示本次训练文件所在目录,这三个变量组合就可以找到本次训练需要的文件路径。 + +`CONF_PADDLE_NIC`表示`paddle pserver`进程需要的`--nics`参数,即网卡名 + +`CONF_PADDLE_PORT`表示`paddle pserver`的`--port`参数,`CONF_PADDLE_PORTS_NUM`则表示稠密更新的端口数量,也就是`--ports_num`参数。 + +`CONF_PADDLE_PORTS_NUM_SPARSE`表示稀疏更新的端口数量,也就是`--ports_num_for_sparse`参数。 + +`CONF_PADDLE_GRADIENT_NUM`表示训练节点数量,即`--num_gradient_servers`参数 + +编写完YAML文件后,可以使用Kubernetes的命令行工具创建job。 + +```bash +kubectl create -f job.yaml +``` + +创建成功后,Kubernetes就会创建3个pod作为PaddlePaddle节点然后拉取镜像,启动容器开始训练。 + + +### 查看输出 + +在训练过程中,可以在共享存储上查看输出的日志和模型,例如output目录下就存放了输出结果。注意node_0,node_1,node_2这几个目录表示PaddlePaddle节点与trainer_id,并不是Kubernetes中的node概念。 + +```bash +[root@paddle-kubernetes-node0 output]# tree -d +. +├── node_0 +│   ├── server.log +│   └── train.log +├── node_1 +│   ├── server.log +│   └── train.log +├── node_2 +...... +├── pass-00002 +│   ├── done +│   ├── ___embedding_0__.w0 +│   ├── ___embedding_1__.w0 +...... +``` + +我们可以通过日志查看容器训练的情况,例如: + +```bash +[root@paddle-kubernetes-node0 node_0]# cat train.log +I1116 09:10:17.123121 50 Util.cpp:155] commandline: + /usr/local/bin/../opt/paddle/bin/paddle_trainer + --nics=eth0 --port=7164 + --ports_num=2 --comment=paddle_process_by_paddle + --pservers=192.168.129.66,192.168.223.143,192.168.129.71 + --ports_num_for_sparse=2 --config=./trainer_config.py + --trainer_count=4 --num_passes=10 --use_gpu=0 + --log_period=50 --dot_period=10 --saving_period=1 + --local=0 --trainer_id=0 + --save_dir=/home/jobpath/paddle-cluster-job/output +I1116 09:10:17.123440 50 Util.cpp:130] Calling runInitFunctions +I1116 09:10:17.123764 50 Util.cpp:143] Call runInitFunctions done. +[WARNING 2016-11-16 09:10:17,227 default_decorators.py:40] please use keyword arguments in paddle config. +[INFO 2016-11-16 09:10:17,239 networks.py:1282] The input order is [movie_id, title, genres, user_id, gender, age, occupation, rating] +[INFO 2016-11-16 09:10:17,239 networks.py:1289] The output order is [__regression_cost_0__] +I1116 09:10:17.392917 50 Trainer.cpp:170] trainer mode: Normal +I1116 09:10:17.613910 50 PyDataProvider2.cpp:257] loading dataprovider dataprovider::process +I1116 09:10:17.680917 50 PyDataProvider2.cpp:257] loading dataprovider dataprovider::process +I1116 09:10:17.681543 50 GradientMachine.cpp:134] Initing parameters.. +I1116 09:10:18.012390 50 GradientMachine.cpp:141] Init parameters done. +I1116 09:10:18.018641 50 ParameterClient2.cpp:122] pserver 0 192.168.129.66:7164 +I1116 09:10:18.018950 50 ParameterClient2.cpp:122] pserver 1 192.168.129.66:7165 +I1116 09:10:18.019069 50 ParameterClient2.cpp:122] pserver 2 192.168.223.143:7164 +I1116 09:10:18.019492 50 ParameterClient2.cpp:122] pserver 3 192.168.223.143:7165 +I1116 09:10:18.019716 50 ParameterClient2.cpp:122] pserver 4 192.168.129.71:7164 +I1116 09:10:18.019836 50 ParameterClient2.cpp:122] pserver 5 192.168.129.71:7165 +``` \ No newline at end of file diff --git a/doc_cn/cluster/k8s/job.yaml b/doc_cn/cluster/k8s/job.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1e0ac464b2ec71e98c28f090124690b01b0755ce --- /dev/null +++ b/doc_cn/cluster/k8s/job.yaml @@ -0,0 +1,43 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: paddle-cluster-job +spec: + parallelism: 3 + completions: 3 + template: + metadata: + name: paddle-cluster-job + spec: + volumes: + - name: jobpath + hostPath: + path: /home/work/paddle_output + containers: + - name: trainer + image: registry.baidu.com/public/paddle:mypaddle + command: ["bin/bash", "-c", "/root/start.sh"] + env: + - name: JOB_NAME + value: paddle-cluster-job + - name: JOB_PATH + value: /home/jobpath + - name: JOB_NAMESPACE + value: default + - name: TRAIN_CONFIG_DIR + value: recommendation + - name: CONF_PADDLE_NIC + value: eth0 + - name: CONF_PADDLE_PORT + value: "7164" + - name: CONF_PADDLE_PORTS_NUM + value: "2" + - name: CONF_PADDLE_PORTS_NUM_SPARSE + value: "2" + - name: CONF_PADDLE_GRADIENT_NUM + value: "3" + volumeMounts: + - name: jobpath + mountPath: /home/jobpath + restartPolicy: Never + \ No newline at end of file diff --git a/doc_cn/cluster/k8s/k8s-paddle-arch.png b/doc_cn/cluster/k8s/k8s-paddle-arch.png new file mode 100644 index 0000000000000000000000000000000000000000..a8c64550b1fa7f41de1eaa9a037c65cddc0cd30e Binary files /dev/null and b/doc_cn/cluster/k8s/k8s-paddle-arch.png differ diff --git a/doc_cn/cluster/k8s/start.sh b/doc_cn/cluster/k8s/start.sh new file mode 100755 index 0000000000000000000000000000000000000000..b3a1334174a20b018d35de3b01b149fc5b10d49d --- /dev/null +++ b/doc_cn/cluster/k8s/start.sh @@ -0,0 +1,19 @@ +#!/bin/sh +set -eu + +jobconfig=${JOB_PATH}"/"${JOB_NAME}"/"${TRAIN_CONFIG_DIR} +cd /root +cp -rf $jobconfig . +cd $TRAIN_CONFIG_DIR + + +python /root/start_paddle.py \ + --dot_period=10 \ + --ports_num_for_sparse=$CONF_PADDLE_PORTS_NUM \ + --log_period=50 \ + --num_passes=10 \ + --trainer_count=4 \ + --saving_period=1 \ + --local=0 \ + --config=./trainer_config.py \ + --use_gpu=0 diff --git a/doc_cn/cluster/k8s/start_paddle.py b/doc_cn/cluster/k8s/start_paddle.py new file mode 100755 index 0000000000000000000000000000000000000000..bc0112a77fb84db8965a09716006377c127ad4db --- /dev/null +++ b/doc_cn/cluster/k8s/start_paddle.py @@ -0,0 +1,159 @@ +#!/usr/bin/python +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import requests +import time +import socket +import os +import argparse + + +# configuration for cluster +API = "/api/v1/namespaces/" +JOBSELECTOR = "labelSelector=job-name=" +JOB_PATH = os.getenv("JOB_PATH") + "/" + os.getenv("JOB_NAME") +JOB_PATH_DATA = JOB_PATH + "/data" +JOB_PATH_OUTPUT = JOB_PATH + "/output" +JOBNAME = os.getenv("JOB_NAME") +NAMESPACE = os.getenv("JOB_NAMESPACE") +PADDLE_NIC = os.getenv("CONF_PADDLE_NIC") +PADDLE_PORT = os.getenv("CONF_PADDLE_PORT") +PADDLE_PORTS_NUM = os.getenv("CONF_PADDLE_PORTS_NUM") +PADDLE_PORTS_NUM_SPARSE = os.getenv("CONF_PADDLE_PORTS_NUM_SPARSE") +PADDLE_SERVER_NUM = os.getenv("CONF_PADDLE_GRADIENT_NUM") + + +def refine_unknown_args(cmd_args): + ''' + refine unknown parameters to handle some special parameters + ''' + new_args = [] + for arg in cmd_args: + if arg.startswith("--") and arg.find("=") != -1: + equal_pos = arg.find("=") # find first = pos + arglist = list(arg) + arglist[equal_pos] = " " + arg = "".join(arglist) + arg = arg.lstrip("-") + new_args += arg.split(" ") + elif arg.startswith("--") and arg.find("=") == -1: + arg = arg.lstrip("-") + new_args.append(arg) + else: + new_args.append(arg) + return new_args + + +def isPodAllRunning(podlist): + ''' + check all pod is running + ''' + require = len(podlist["items"]) + running = 0 + for pod in podlist["items"]: + if pod["status"]["phase"] == "Running": + running += 1 + if require == running: + return True + return False + + +def getPodList(): + ''' + get all container status of the job + ''' + apiserver = "https://" + \ + os.getenv("KUBERNETES_SERVICE_HOST") + ":" + \ + os.getenv("KUBERNETES_SERVICE_PORT_HTTPS") + + pod = API + NAMESPACE + "/pods?" + job = JOBNAME + return requests.get(apiserver + pod + JOBSELECTOR + job, + verify=False).json() + + +def getIdMap(podlist): + ''' + generate tainer_id by ip + ''' + ips = [] + for pod in podlist["items"]: + ips.append(pod["status"]["podIP"]) + ips.sort() + idMap = {} + for i in range(len(ips)): + idMap[ips[i]] = i + return idMap + + +def startPaddle(idMap={}, train_args_dict=None): + ''' + start paddle pserver and trainer + ''' + program = 'paddle train' + args = " --nics=" + PADDLE_NIC + args += " --port=" + str(PADDLE_PORT) + args += " --ports_num=" + str(PADDLE_PORTS_NUM) + args += " --comment=" + "paddle_process_by_paddle" + ip_string = "" + for ip in idMap.keys(): + ip_string += (ip + ",") + ip_string = ip_string.rstrip(",") + args += " --pservers=" + ip_string + args_ext = "" + for key, value in train_args_dict.items(): + args_ext += (' --' + key + '=' + value) + localIP = socket.gethostbyname(socket.gethostname()) + trainerId = idMap[localIP] + args += " " + args_ext + " --trainer_id=" + \ + str(trainerId) + " --save_dir=" + JOB_PATH_OUTPUT + logDir = JOB_PATH_OUTPUT + "/node_" + str(trainerId) + if not os.path.exists(JOB_PATH_OUTPUT): + os.makedirs(JOB_PATH_OUTPUT) + os.mkdir(logDir) + copyCommand = 'cp -rf ' + JOB_PATH_DATA + \ + "/" + str(trainerId) + " ./data" + os.system(copyCommand) + startPserver = 'nohup paddle pserver' + \ + " --port=" + str(PADDLE_PORT) + \ + " --ports_num=" + str(PADDLE_PORTS_NUM) + \ + " --ports_num_for_sparse=" + str(PADDLE_PORTS_NUM_SPARSE) + \ + " --nics=" + PADDLE_NIC + \ + " --comment=" + "paddle_process_by_paddle" + \ + " --num_gradient_servers=" + str(PADDLE_SERVER_NUM) +\ + " > " + logDir + "/server.log 2>&1 &" + print startPserver + os.system(startPserver) + # wait until pservers completely start + time.sleep(10) + startTrainer = program + args + " > " + \ + logDir + "/train.log 2>&1 < /dev/null" + print startTrainer + os.system(startTrainer) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(prog="start_paddle.py", + description='simple tool for k8s') + args, train_args_list = parser.parse_known_args() + train_args = refine_unknown_args(train_args_list) + train_args_dict = dict(zip(train_args[:-1:2], train_args[1::2])) + podlist = getPodList() + # need to wait until all pods are running + while not isPodAllRunning(podlist): + time.sleep(10) + podlist = getPodList() + idMap = getIdMap(podlist) + startPaddle(idMap, train_args_dict) diff --git a/doc_cn/demo/quick_start/index.md b/doc_cn/demo/quick_start/index.md index 4d9b24ba851a7aaaeb0d79bfbeb0703b8878b77f..4a6e07ee1ffd94cf8f781af307b53a96a78e6b93 100644 --- a/doc_cn/demo/quick_start/index.md +++ b/doc_cn/demo/quick_start/index.md @@ -32,13 +32,11 @@ ## 数据格式准备(Data Preparation) 在本问题中,我们使用[Amazon电子产品评论数据](http://jmcauley.ucsd.edu/data/amazon/), -将评论分为好评(正样本)和差评(负样本)两类。[源码](https://github.com/baidu/Paddle)的`demo/quick_start`里提供了数据下载脚本 -和预处理脚本。 +将评论分为好评(正样本)和差评(负样本)两类。[源码](https://github.com/PaddlePaddle/Paddle)的`demo/quick_start`里提供了下载已经预处理数据的脚本(如果想从最原始的数据处理,可以使用脚本 `./demo/quick_start/data/proc_from_raw_data/get_data.sh`)。 ```bash cd demo/quick_start ./data/get_data.sh -./preprocess.sh ``` ## 数据向模型传送(Transfer Data to Model) @@ -143,7 +141,7 @@ PyDataProvider2。 我们将以基本的逻辑回归网络作为起点,并逐渐展示更加深入的功能。更详细的网络配置 连接请参考Layer文档。 -所有配置在[源码](https://github.com/baidu/Paddle)`demo/quick_start`目录,首先列举逻辑回归网络。 +所有配置在[源码](https://github.com/PaddlePaddle/Paddle)`demo/quick_start`目录,首先列举逻辑回归网络。 ### 逻辑回归模型(Logistic Regression) diff --git a/doc_cn/faq/index.rst b/doc_cn/faq/index.rst index 3eb0e10ae2228740cd384270db5070e367f7007b..551430eb41765673700b7c6568e4b483641f2cac 100644 --- a/doc_cn/faq/index.rst +++ b/doc_cn/faq/index.rst @@ -4,22 +4,18 @@ PaddlePaddle常见问题 .. contents:: -1. 如何减少PaddlePaddle的内存占用 +1. 如何减少内存占用 --------------------------------- -神经网络的训练本身是一个非常消耗内存和显存的工作。经常会消耗数十G的内存和数G的显存。 +神经网络的训练本身是一个非常消耗内存和显存的工作,经常会消耗数10GB的内存和数GB的显存。 PaddlePaddle的内存占用主要分为如下几个方面\: -* DataProvider缓冲池内存 (只针对内存) -* 神经元激活内存 (针对内存和显存) -* 参数内存 (针对内存和显存) +* DataProvider缓冲池内存(只针对内存) +* 神经元激活内存(针对内存和显存) +* 参数内存 (针对内存和显存) * 其他内存杂项 -这其中,其他内存杂项是指PaddlePaddle本身所用的一些内存,包括字符串分配,临时变量等等, -这些内存就不考虑如何缩减了。 - -其他的内存的减少方法依次为 - +其中,其他内存杂项是指PaddlePaddle本身所用的一些内存,包括字符串分配,临时变量等等,暂不考虑在内。 减少DataProvider缓冲池内存 ++++++++++++++++++++++++++ @@ -39,28 +35,28 @@ PyDataProvider使用的是异步加载,同时在内存里直接随即选取数 .. literalinclude:: reduce_min_pool_size.py -这样做可以极大的减少内存占用,并且可能会加速训练过程。 详细文档参考 `这里 +这样做可以极大的减少内存占用,并且可能会加速训练过程,详细文档参考 `这里 <../ui/data_provider/pydataprovider2.html#provider>`_ 。 神经元激活内存 ++++++++++++++ -神经网络在训练的时候,会对每一个激活暂存一些数据,包括激活,參差等等。 +神经网络在训练的时候,会对每一个激活暂存一些数据,如神经元激活值等。 在反向传递的时候,这些数据会被用来更新参数。这些数据使用的内存主要和两个参数有关系, 一是batch size,另一个是每条序列(Sequence)长度。所以,其实也是和每个mini-batch中包含 的时间步信息成正比。 -所以,做法可以有两种。他们是 +所以做法可以有两种: * 减小batch size。 即在网络配置中 :code:`settings(batch_size=1000)` 设置成一个小一些的值。但是batch size本身是神经网络的超参数,减小batch size可能会对训练结果产生影响。 * 减小序列的长度,或者直接扔掉非常长的序列。比如,一个数据集大部分序列长度是100-200, - 但是突然有一个10000长的序列,就很容易导致内存超限。特别是在LSTM等RNN中。 + 但是突然有一个10000长的序列,就很容易导致内存超限,特别是在LSTM等RNN中。 参数内存 ++++++++ PaddlePaddle支持非常多的优化算法(Optimizer),不同的优化算法需要使用不同大小的内存。 -例如如果使用 :code:`adadelta` 算法,则需要使用参数规模大约5倍的内存。 如果参数保存下来的 +例如使用 :code:`adadelta` 算法,则需要使用等于权重参数规模大约5倍的内存。举例,如果参数保存下来的模型目录 文件为 :code:`100M`, 那么该优化算法至少需要 :code:`500M` 的内存。 可以考虑使用一些优化算法,例如 :code:`momentum`。 @@ -68,11 +64,11 @@ PaddlePaddle支持非常多的优化算法(Optimizer),不同的优化算法需 2. 如何加速PaddlePaddle的训练速度 --------------------------------- -PaddlePaddle是神经网络训练平台,加速PaddlePaddle训练有如下几个方面\: +加速PaddlePaddle训练可以考虑从以下几个方面\: * 减少数据载入的耗时 * 加速训练速度 -* 利用更多的计算资源 +* 利用分布式训练驾驭更多的计算资源 减少数据载入的耗时 ++++++++++++++++++ @@ -108,25 +104,20 @@ PaddlePaddle支持Sparse的训练,sparse训练需要训练特征是 :code:`spa 利用更多的计算资源可以分为一下几个方式来进行\: * 单机CPU训练 - * 使用多线程训练。设置命令行参数 :code:`trainer_count`,即可以设置参与训练的线程数量。使用方法为 :code:`paddle train --trainer_count=4` + * 使用多线程训练。设置命令行参数 :code:`trainer_count`。 + * 单机GPU训练 - * 使用显卡训练。设置命令行参数 :code:`use_gpu`。 使用方法为 :code:`paddle train --use_gpu=true` - * 使用多块显卡训练。设置命令行参数 :code:`use_gpu` 和 :code:`trainer_count`。使用 :code:`--use_gpu=True` 开启GPU训练,使用 :code:`trainer_count` 指定显卡数量。使用方法为 :code:`paddle train --use_gpu=true --trainer_count=4` + * 使用显卡训练。设置命令行参数 :code:`use_gpu`。 + * 使用多块显卡训练。设置命令行参数 :code:`use_gpu` 和 :code:`trainer_count` 。 + * 多机训练 - * 使用多机训练的方法也比较简单,需要先在每个节点启动 :code:`paddle pserver`,在使用 :code:`paddle train --pservers=192.168.100.1,192.168.100.2` 来指定每个pserver的ip地址 - * 具体的多机训练方法参考 `多机训练 `_ 文档。 + * 具体的多机训练方法参考 `多机训练文档 <../ui/data_provider/pydataprovider2.html#provider>`_ 。 3. 遇到“非法指令”或者是“illegal instruction” -------------------------------------------- -paddle在进行计算的时候为了提升计算性能,使用了avx指令。部分老的cpu型号无法支持这样的指令。通常来说执行下grep avx /proc/cpuinfo看看是否有输出即可知道是否支持。(另:用此方法部分虚拟机可能检测到支持avx指令但是实际运行会挂掉,请当成是不支持,看下面的解决方案) - -解决办法是\: - -* 使用 NO_AVX的 `安装包 <../build_and_install/index.html>`_ 或者 `Docker image <../build_and_install/install/docker_install.html>`_ -* 或者,使用 :code:`-DWITH_AVX=OFF` 重新编译PaddlePaddle。 - +PaddlePaddle使用avx SIMD指令提高cpu执行效率,因此错误的使用二进制发行版可能会导致这种错误,请选择正确的版本。 4. 如何选择SGD算法的学习率 -------------------------- @@ -158,7 +149,7 @@ paddle在进行计算的时候为了提升计算性能,使用了avx指令。 6. 如何共享参数 --------------- -PaddlePaddle的参数使用名字 :code:`name` 作为参数的ID,相同名字的参数,会共享参数。设置参数的名字,可以使用 :code:`ParamAttr(name="YOUR_PARAM_NAME")` 来设置。更方便的设置方式,是想要共享的参数使用同样的 :code:`ParamAttr` 对象。 +PaddlePaddle的参数使用名字 :code:`name` 作为参数的ID,相同名字的参数,会共享参数。设置参数的名字,可以使用 :code:`ParamAttr(name="YOUR_PARAM_NAME")` 来设置。更方便的设置方式,是使得要共享的参数使用同样的 :code:`ParamAttr` 对象。 简单的全连接网络,参数共享的配置示例为\: @@ -208,9 +199,6 @@ PaddlePaddle的参数使用名字 :code:`name` 作为参数的ID,相同名字 paddle package is already in your PYTHONPATH. But unittest need a clean environment. Please uninstall paddle package before start unittest. Try to 'pip uninstall paddle'. -解决办法是:卸载paddle包 :code:`pip uninstall paddle`。 - -原因是:单元测试使用了一个旧版本的python包,而没有测试到代码中实际修改的python包。即单元测试需要一个干净的环境: +解决办法是: -* 如果paddle包已经在python的site-packages里面了,那么单元测试时使用的paddle包,就是site-packages里面的python包,而不是源码目录里 :code:`/python` 目录下的python包。 -* 即便设置了 :code:`PYTHONPATH` 到 :code:`/python` 也没用,因为python的搜索路径是优先已经安装的python包。 \ No newline at end of file +* 卸载PaddlePaddle包 :code:`pip uninstall paddle`, 清理掉老旧的PaddlePaddle安装包,使得单元测试有一个干净的环境。如果PaddlePaddle包已经在python的site-packages里面,单元测试会引用site-packages里面的python包,而不是源码目录里 :code:`/python` 目录下的python包。同时,即便设置 :code:`PYTHONPATH` 到 :code:`/python` 也没用,因为python的搜索路径是优先已经安装的python包。 diff --git a/doc_cn/ui/cmd/dump_config.rst b/doc_cn/ui/cmd/dump_config.rst deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/doc_cn/ui/cmd/index.rst b/doc_cn/ui/cmd/index.rst index 6d62180a6a5e3f2490cccd2a90213050aa3c172e..31a8b8a79f4a87101bd6030eb4e779fd11d65811 100644 --- a/doc_cn/ui/cmd/index.rst +++ b/doc_cn/ui/cmd/index.rst @@ -1,24 +1,20 @@ -命令行参数 -========== +命令 +==== -安装好的PaddlePaddle脚本包括多条命令,他们是 +安装好PaddlePaddle后,在命令行直接敲击 ``paddle`` 或 ``paddle --help`` 会显示如下一些命令。 -* paddle train即为PaddlePaddle的训练进程。可以使用paddle train完成单机多显卡多线程的训 - 练。也可以和paddle pserver组合使用,完成多机训练。 -* paddle pserver为PaddlePaddle的parameter server进程。负责多机训练中的参数聚合工作。 -* paddle version可以打印出PaddlePaddle的版本和编译时信息。 -* merge_model 可以将PaddlePaddle的模型和配置打包成一个文件。方便部署分发。 -* dump_config 可以将PaddlePaddle的训练模型以proto string的格式打印出来 -* make_diagram 可以使用graphviz对PaddlePaddle的网络模型进行绘制,方便调试使用。 +* ``train`` Start a paddle_trainer + 启动一个PaddlePaddle训练进程。 ``paddle train`` 可以通过命令行参数 ``-local=true`` 启动一个单机的训练进程;也可以和 ``paddle pserver`` 一起使用启动多机的分布式训练进程。 +* ``pserver`` Start a paddle_pserver_main + 在多机分布式训练下启动PaddlePaddle的parameter server进程。 +* ``version`` Print paddle version + 用于打印当前PaddlePaddle的版本和编译选项相关信息。常见的输出格式如下:1)第一行说明了PaddlePaddle的版本信息;2)第二行开始说明了一些主要的编译选项,具体意义可以参考 `编译参数选项文件 <../../build_and_install/cmake/compile_options.html>`_ 。 -更详细的介绍请参考各个命令的命令行参数文档。 + .. literalinclude:: paddle_version.txt -.. toctree:: - :glob: - - paddle_train.rst - paddle_pserver.rst - paddle_version.rst - merge_model.rst - dump_config.rst - make_diagram.rst +* ``merge_model`` Start a paddle_merge_model + 用于将PaddlePaddle的模型参数文件和模型配置文件打包成一个文件,方便做部署分发。 +* ``dump_config`` Dump the trainer config as proto string + 用于将PaddlePaddle的模型配置文件以proto string的格式打印出来。 +* ``make_diagram`` + 使用graphviz对PaddlePaddle的模型配置文件进行绘制。 \ No newline at end of file diff --git a/doc_cn/ui/cmd/make_diagram.rst b/doc_cn/ui/cmd/make_diagram.rst deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/doc_cn/ui/cmd/merge_model.rst b/doc_cn/ui/cmd/merge_model.rst deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/doc_cn/ui/cmd/paddle_pserver.rst b/doc_cn/ui/cmd/paddle_pserver.rst deleted file mode 100644 index 891975c34af5c34dddc754b79bd3e1adda9d9671..0000000000000000000000000000000000000000 --- a/doc_cn/ui/cmd/paddle_pserver.rst +++ /dev/null @@ -1,2 +0,0 @@ -paddle pserver的命令行参数 -========================== diff --git a/doc_cn/ui/cmd/paddle_train.rst b/doc_cn/ui/cmd/paddle_train.rst deleted file mode 100644 index 87b84f5cbdbbe016d9bcdbda2cb30d93d2ad8022..0000000000000000000000000000000000000000 --- a/doc_cn/ui/cmd/paddle_train.rst +++ /dev/null @@ -1,2 +0,0 @@ -paddle train的命令行参数 -======================== diff --git a/doc_cn/ui/cmd/paddle_version.rst b/doc_cn/ui/cmd/paddle_version.rst deleted file mode 100644 index 0a4f8dd472a6009ef6832df75be043c24bb32ba0..0000000000000000000000000000000000000000 --- a/doc_cn/ui/cmd/paddle_version.rst +++ /dev/null @@ -1,9 +0,0 @@ -paddle version的命令行参数 -========================== - -paddle version可以打印出paddle的版本信息和编译的选项。常见的输出格式为 - -.. literalinclude:: paddle_version.txt - -其第一行说明了paddle的版本,后面跟着一系列编译参数。这里可以参考paddle的 -`编译参数选项文件 <../../build/cmake/compile_options.html>`_ diff --git a/doc_cn/ui/index.rst b/doc_cn/ui/index.rst index 5aba272c627204110a56337f0f120f3f2cd37ae9..d871ad805ff7cd37fb83f24024003e54bce77f42 100644 --- a/doc_cn/ui/index.rst +++ b/doc_cn/ui/index.rst @@ -1,8 +1,9 @@ +######## 用户接口 -======== +######## 数据提供 -'''''''' +======== .. toctree:: :maxdepth: 1 @@ -10,16 +11,23 @@ data_provider/index.rst -命令行参数 -'''''''''' -* `Use Case <../../doc/ui/cmd_argument/use_case.html>`_ -* `Argument Outline <../../doc/ui/cmd_argument/argument_outline.html>`_ -* `Detail Description <../../doc/ui/cmd_argument/detail_introduction.html>`_ +命令及命令行参数 +================ + +.. toctree:: + :maxdepth: 1 + + cmd/index.rst + +* `参数用例 <../../doc/ui/cmd_argument/use_case.html>`_ +* `参数分类 <../../doc/ui/cmd_argument/argument_outline.html>`_ +* `参数描述 <../../doc/ui/cmd_argument/detail_introduction.html>`_ 预测 -'''' +==== .. toctree:: + :maxdepth: 1 predict/swig_py_paddle.rst diff --git a/doc_cn/ui/predict/swig_py_paddle.rst b/doc_cn/ui/predict/swig_py_paddle.rst index 012ac4ff6e66a022fa7d8af798236f55b62011ec..89031dd72f5065b6919d873f5611a5e94e8b62e3 100644 --- a/doc_cn/ui/predict/swig_py_paddle.rst +++ b/doc_cn/ui/predict/swig_py_paddle.rst @@ -1,38 +1,36 @@ -PaddlePaddle的Python预测接口 -================================== +基于Python的预测 +================ -PaddlePaddle目前使用Swig对其常用的预测接口进行了封装,使在Python环境下的预测接口更加简单。 -在Python环境下预测结果,主要分为以下几个步骤。 +预测流程 +-------- -* 读入解析训练配置 -* 构造GradientMachine -* 准备数据 -* 预测 +PaddlePaddle使用swig对常用的预测接口进行了封装,通过编译会生成py_paddle软件包,安装该软件包就可以在python环境下实现模型预测。可以使用python的 ``help()`` 函数查询软件包相关API说明。 -典型的预测代码如下,使用mnist手写识别作为样例, 完整代码见 -:code:`src_root/doc/ui/predict/predict_sample.py` 。 +基于Python的模型预测,主要包括以下五个步骤。 + +1. 初始化PaddlePaddle环境 + 在程序开始阶段,通过调用 ``swig_paddle.initPaddle()`` 并传入相应的命令行参数初始化PaddlePaddle。 +2. 解析模型配置文件 + 初始化之后,可以通过调用 ``parse_config()`` 解析训练模型时用的配置文件。注意预测数据通常不包含label, 同时预测网络通常直接输出最后一层的结果而不是像训练网络一样再接一层cost layer,所以一般需要对训练用的模型配置文件稍作相应修改才能在预测时使用。 +3. 构造paddle.GradientMachine + 通过调用 ``swig_paddle.GradientMachine.createFromConfigproto()`` 传入上一步解析出来的模型配置就可以创建一个 ``GradientMachine``。 +4. 准备预测数据 + swig_paddle中的预测接口的参数是自定义的C++数据类型,py_paddle里面提供了一个工具类 ``DataProviderConverter`` 可以用于接收和PyDataProvider2一样的输入数据并转换成预测接口所需的数据类型。 +5. 模型预测 + 通过调用 ``forwardTest()`` 传入预测数据,直接返回计算结果。 + + +预测Demo +-------- + +如下是一段使用mnist model来实现手写识别的预测代码。完整的代码见 ``src_root/doc/ui/predict/predict_sample.py`` 。mnist model可以通过 ``src_root\demo\mnist`` 目录下的demo训练出来。 .. literalinclude:: ../../../doc/ui/predict/predict_sample.py :language: python - :lines: 15-18,90-100,101-104 - -主要的软件包为py_paddle.swig_paddle,这个软件包文档相对完善。可以使用python的 -:code:`help()` 函数查询文档。主要步骤为: - -* 在程序开始阶段,使用 :code:`swig_paddle.initPaddle()` 传入命令行参数初始化 - PaddlePaddle。详细的命令行参数请参考 - `命令行参数 <../cmd_argument/detail_introduction.html>`_ 。 -* 接下来使用 :code:`parse_config()` 解析训练时的配置文件。这里要注意预测数据通常 - 不包含label, 而且预测网络通常直接输出最后一层的结果而不是像训练时一样以cost - layer作为输出,所以用于预测的配置文件要做相应的修改。 -* 使用 :code:`swig_paddle.GradientMachine.createFromConfigproto()` 根据上一步解 - 析好的配置创建神经网络。 -* 创建一个 :code:`DataProviderConverter` 对象converter。 - - swig_paddle接受的原始数据是C++的Matrix,也就是直接写内存的float数组。 - 这个接口并不用户友好。所以,我们提供了一个工具类DataProviderConverter。 - 这个工具类接收和PyDataProvider2一样的输入数据,详情请参考 - `PyDataProvider2文档 <../../../doc/ui/data_provider/pydataprovider2.html>`_ 。 -* 最后使用 :code:`forwardTest()` 直接提取出神经网络Output层的输出结果。典型的输出结果为\: + :lines: 15-18,121-136 + + +Demo预测输出如下,其中value即为softmax层的输出。由于TEST_DATA包含两条预测数据,所以输出的value包含两个向量 。 .. code-block:: text @@ -45,4 +43,4 @@ PaddlePaddle目前使用Swig对其常用的预测接口进行了封装,使在P 2.70634608e-08, 3.48565123e-08, 5.25639710e-09, 4.48684503e-08]], dtype=float32)}] -其中,value即为softmax层的输出。由于数据是两条,所以输出的value包含两个向量 。 + diff --git a/paddle/cuda/include/hl_cuda.h b/paddle/cuda/include/hl_cuda.h index 357286e3188a6f3184bc56e75232bf2e1ec54e44..2c7d665101f36f9c32ab132ca279abf3ac062a8f 100644 --- a/paddle/cuda/include/hl_cuda.h +++ b/paddle/cuda/include/hl_cuda.h @@ -15,8 +15,8 @@ limitations under the License. */ #ifndef HL_CUDA_H_ #define HL_CUDA_H_ -#include "hl_base.h" #include +#include "hl_base.h" /** * @brief HPPL event. @@ -332,4 +332,14 @@ extern bool hl_cuda_event_is_ready(hl_event_t event); */ extern void hl_device_synchronize(); +/** + * @brief gpu profiler start + */ +extern void hl_profiler_start(); + +/** + * @brief gpu profiler stop + */ +extern void hl_profiler_end(); + #endif // HL_CUDA_H_ diff --git a/paddle/cuda/include/stub/hl_cuda_stub.h b/paddle/cuda/include/stub/hl_cuda_stub.h index 1f91068cdf8b3d472c4b403d1ec7d5293c28c07e..24923a0d4a0cdd49214305c2f7716eeef575c7ee 100644 --- a/paddle/cuda/include/stub/hl_cuda_stub.h +++ b/paddle/cuda/include/stub/hl_cuda_stub.h @@ -90,4 +90,8 @@ inline bool hl_cuda_event_is_ready(hl_event_t event) { return true; } inline void hl_device_synchronize() {} +inline void hl_profiler_start() {} + +inline void hl_profiler_end() {} + #endif // HL_CUDA_STUB_H_ diff --git a/paddle/cuda/src/hl_cuda_device.cc b/paddle/cuda/src/hl_cuda_device.cc index 745be35b56278ed2e0033d5fd2806320d3164d7c..6b71a538485a09cf40a53eddf1ee2f3e2c768b2c 100644 --- a/paddle/cuda/src/hl_cuda_device.cc +++ b/paddle/cuda/src/hl_cuda_device.cc @@ -12,15 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include -#include #include +#include +#include #include #include "hl_cuda.h" #include "hl_cuda.ph" -#include "hl_thread.ph" #include "hl_dso_loader.h" +#include "hl_thread.ph" #include "paddle/utils/Logging.h" namespace dynload { @@ -133,7 +134,9 @@ void *cudart_dso_handle = nullptr; __macro(cudaGetLastError) \ __macro(cudaFuncSetCacheConfig) \ __macro(cudaRuntimeGetVersion) \ - __macro(cudaGetErrorString) + __macro(cudaGetErrorString) \ + __macro(cudaProfilerStart) \ + __macro(cudaProfilerStop) // clang-format on CUDA_ROUTINE_EACH(DYNAMIC_LOAD_CUDART_WRAP) @@ -742,3 +745,7 @@ bool hl_cuda_event_is_ready(hl_event_t event) { } return true; } + +void hl_profiler_start() { CHECK_CUDA(dynload::cudaProfilerStart()); } + +void hl_profiler_end() { CHECK_CUDA(dynload::cudaProfilerStop()); } diff --git a/paddle/math/tests/CMakeLists.txt b/paddle/math/tests/CMakeLists.txt index 5ac7888748ed000ce03c28c0b3bcfb7565a82de5..d593fe0fa3975db105c484a58bbccd3313bf36f1 100644 --- a/paddle/math/tests/CMakeLists.txt +++ b/paddle/math/tests/CMakeLists.txt @@ -15,3 +15,4 @@ add_simple_unittest(test_CpuGpuVector) add_simple_unittest(test_Allocator) add_simple_unittest(test_FPException) add_simple_unittest(test_BaseMatrix) +add_simple_unittest(test_GpuProfiler) diff --git a/paddle/math/tests/test_GpuProfiler.cpp b/paddle/math/tests/test_GpuProfiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c3542b7834224e2fa6fe323a1fbe8ea1e7cd68de --- /dev/null +++ b/paddle/math/tests/test_GpuProfiler.cpp @@ -0,0 +1,137 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifndef PADDLE_ONLY_CPU + +#include "paddle/utils/Util.h" +#include "paddle/math/Matrix.h" +#include "paddle/math/SparseMatrix.h" +#include +#include "paddle/gserver/tests/TestUtil.h" +#include "paddle/utils/Stat.h" + +using namespace paddle; // NOLINT +using namespace std; // NOLINT + +void MatrixCheckErr(const Matrix& matrix1, const Matrix& matrix2) { + CHECK(matrix1.getHeight() == matrix2.getHeight()); + CHECK(matrix1.getWidth() == matrix2.getWidth()); +#ifndef PADDLE_TYPE_DOUBLE + real err = 1e-3; +#else + real err = 1e-10; +#endif + + int height = matrix1.getHeight(); + int width = matrix1.getWidth(); + const real* data1 = matrix1.getData(); + const real* data2 = matrix2.getData(); + int count = 0; + for (int i = 0; i < height; i++) { + for (int j = 0; j < width; j++) { + real a = data1[i * width + j]; + real b = data2[i * width + j]; + if (fabs(a - b) > err) { + if ((fabsf(a - b) / fabsf(a)) > (err / 10.0f)) { + count++; + } + } + } + } + EXPECT_EQ(count, 0) << "There are " << count << " different element."; +} + +void testBilinearFwdBwd(int numSamples, int imgSizeH, int imgSizeW, + int channels) { + int inWidth = imgSizeH * imgSizeW * channels; + int outWidth = 2 * imgSizeH * 2 * imgSizeW * channels; + real ratioH = 0.5; + real ratioW = 0.5; + + // forward + MatrixPtr input = CpuMatrix::create(numSamples, inWidth, false, false); + MatrixPtr inputGpu = GpuMatrix::create(numSamples, inWidth, false, true); + + MatrixPtr target = CpuMatrix::create(numSamples, outWidth, false, false); + MatrixPtr targetGpu = GpuMatrix::create(numSamples, outWidth, false, true); + MatrixPtr targetCheck = CpuMatrix::create(numSamples, outWidth, false, false); + + input->randomizeUniform(); + inputGpu->copyFrom(*input); + + { + // nvprof: GPU Proflier + REGISTER_GPU_PROFILER("testBilinearFwdBwd"); + target->bilinearForward(*input, imgSizeH, imgSizeW, + 2 * imgSizeH, 2 * imgSizeW, channels, ratioH, ratioW); + targetGpu->bilinearForward(*inputGpu, imgSizeH, imgSizeW, + 2 * imgSizeH, 2 * imgSizeW, channels, ratioH, ratioW); + } + + // check + targetCheck->copyFrom(*targetGpu); + MatrixCheckErr(*target, *targetCheck); + + // backward + MatrixPtr inputGrad = CpuMatrix::create(numSamples, inWidth, false, false); + MatrixPtr inputGpuGrad = GpuMatrix::create(numSamples, inWidth, false, true); + + MatrixPtr targetGrad = CpuMatrix::create(numSamples, outWidth, false, false); + MatrixPtr targetGpuGrad = GpuMatrix::create(numSamples, outWidth, false, + true); + MatrixPtr targetCheckGrad = + CpuMatrix::create(numSamples, inWidth, false, false); + + inputGrad->randomizeUniform(); + targetGrad->randomizeUniform(); + inputGpuGrad->copyFrom(*inputGrad); + targetGpuGrad->copyFrom(*targetGrad); + + inputGrad->bilinearBackward(*targetGrad, 2 * imgSizeH, 2 * imgSizeW, + imgSizeH, imgSizeW, channels, ratioH, ratioW); + inputGpuGrad->bilinearBackward(*targetGpuGrad, 2 * imgSizeH, 2 * imgSizeW, + imgSizeH, imgSizeW, channels, ratioH, ratioW); + + // check + targetCheckGrad->copyFrom(*inputGpuGrad); + MatrixCheckErr(*inputGrad, *targetCheckGrad); +} + +TEST(Profiler, testBilinearFwdBwd) { + auto numSamples = 10; + auto channels = 16; + auto imgSize = 64; + { + // nvprof: GPU Proflier + REGISTER_GPU_PROFILER("testBilinearFwdBwd"); + // Paddle built-in timer + REGISTER_TIMER_INFO("testBilinearFwdBwd", + "numSamples = 10, channels = 16, imgSizeX = 64, imgSizeY = 64"); + testBilinearFwdBwd(numSamples, imgSize, imgSize, channels); + } + globalStat.printAllStatus(); +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + initMain(argc, argv); + + // nvprof: GPU Proflier + REGISTER_GPU_PROFILER("RecursiveProfilingTest", + "numSamples = 10, channels = 16, imgSizeX = 64, imgSizeY = 64"); + + return RUN_ALL_TESTS(); +} + +#endif /* PADDLE_ONLY_CPU */ diff --git a/paddle/utils/Stat.cpp b/paddle/utils/Stat.cpp index d7b20ca5eb2f4eadaa6b4acad056d669a9b59c14..ab140c33502ad315d087bb3afc7f39bffc122894 100644 --- a/paddle/utils/Stat.cpp +++ b/paddle/utils/Stat.cpp @@ -65,6 +65,7 @@ std::ostream& operator<<(std::ostream& outPut, const Stat& stat) { auto showStat = [&](const StatInfo* info, pid_t tid, bool isFirst = true) { uint64_t average = 0; if (info->count_ > 0) { + outPut << std::setfill(' ') << std::left; if (!isFirst) { outPut << std::setw(42) << " "; } @@ -202,4 +203,22 @@ StatInfo::~StatInfo() { } } +static unsigned g_profileCount = 0; +static std::recursive_mutex g_profileMutex; + +GpuProfiler::GpuProfiler(std::string statName, std::string info) + : guard_(g_profileMutex) { + if (++g_profileCount == 1) { + LOG(INFO) << "Enable GPU Profiler Stat: [" + << statName << "] " << info; + hl_profiler_start(); + } +} + +GpuProfiler::~GpuProfiler() { + if (--g_profileCount == 0) { + hl_profiler_end(); + } +} + } // namespace paddle diff --git a/paddle/utils/Stat.h b/paddle/utils/Stat.h index 4051145d9246639fce5d041103c1211a939eddca..1ef688ea8da53ee0cd51b1775e671f2b10be782b 100644 --- a/paddle/utils/Stat.h +++ b/paddle/utils/Stat.h @@ -15,19 +15,19 @@ limitations under the License. */ #pragma once #include -#include #include -#include #include +#include +#include #include +#include #include -#include -#include "Logging.h" #include "BarrierStat.h" #include "Locks.h" +#include "Logging.h" #include "ThreadLocal.h" -#include "BarrierStat.h" +#include "hl_gpu.h" namespace paddle { @@ -283,4 +283,24 @@ inline StatSet& registerTimerArg2(uint64_t threshold = -1, #endif // DISABLE_TIMER +class GpuProfiler final { +public: + GpuProfiler(std::string statName, std::string info); + ~GpuProfiler(); + +private: + std::lock_guard guard_; +}; + +#ifdef PADDLE_DISABLE_PROFILER + +#define REGISTER_GPU_PROFILER(statName, ...) + +#else + +#define REGISTER_GPU_PROFILER(statName, ...) \ + GpuProfiler __gpuProfiler(statName, #__VA_ARGS__); + +#endif // DISABLE_PROFILER + } // namespace paddle diff --git a/python/paddle/trainer_config_helpers/activations.py b/python/paddle/trainer_config_helpers/activations.py index 6261934e1bc8e8df62aeaa0757f4a237f91ef748..eeed18a98a27313dac65a695960043d0543bb577 100644 --- a/python/paddle/trainer_config_helpers/activations.py +++ b/python/paddle/trainer_config_helpers/activations.py @@ -16,7 +16,8 @@ __all__ = [ "TanhActivation", "SigmoidActivation", "SoftmaxActivation", "IdentityActivation", "LinearActivation", 'SequenceSoftmaxActivation', 'ExpActivation', "ReluActivation", "BReluActivation", "SoftReluActivation", - "STanhActivation", "AbsActivation", "SquareActivation", "BaseActivation" + "STanhActivation", "AbsActivation", "SquareActivation", "BaseActivation", + "LogActivation" ]