diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5f1e62bc865d9aef15ace98434fe41efc41e49a7..40c5fec3519e79aeed7e47187e62b149a2da03ca 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,10 +1,10 @@ -- repo: https://github.com/reyoung/mirrors-yapf.git - sha: v0.13.2 +- repo: https://github.com/pre-commit/mirrors-yapf.git + sha: v0.16.0 hooks: - - id: yapf - files: (.*\.(py|bzl)|BUILD|.*\.BUILD|WORKSPACE)$ # Bazel BUILD files follow Python syntax. + - id: yapf + files: \.py$ - repo: https://github.com/pre-commit/pre-commit-hooks - sha: v0.7.1 + sha: a11d9314b22d8f8c7556443875b731ef05965464 hooks: - id: check-merge-conflict - id: check-symlinks @@ -24,11 +24,11 @@ files: \.md$ - id: remove-tabs files: \.md$ -- repo: local - hooks: - - id: convert-markdown-into-html - name: convert-markdown-into-html - description: "Convert README.md into index.html and README.en.md into index.en.html" - entry: python pre-commit-hooks/convert_markdown_into_html.py - language: system - files: \.md$ +- repo: local + hooks: + - id: convert-markdown-into-html + name: convert-markdown-into-html + description: Convert README.md into index.html and README.en.md into index.en.html + entry: python pre-commit-hooks/convert_markdown_into_html.py + language: system + files: \.md$ diff --git a/README.en.md b/README.en.md new file mode 100644 index 0000000000000000000000000000000000000000..6e235bd13e8c6c772d0e166506d68572288d3772 --- /dev/null +++ b/README.en.md @@ -0,0 +1,12 @@ +# Deep Learning with PaddlePaddle + +1. [Fit a Line](http://book.paddlepaddle.org/fit_a_line/index.en.html) +1. [Recognize Digits](http://book.paddlepaddle.org/recognize_digits/index.en.html) +1. [Image Classification](http://book.paddlepaddle.org/image_classification/index.en.html) +1. [Word to Vector](http://book.paddlepaddle.org/word2vec/index.en.html) +1. [Understand Sentiment](http://book.paddlepaddle.org/understand_sentiment/index.en.html) +1. [Label Semantic Roles](http://book.paddlepaddle.org/label_semantic_roles/index.en.html) +1. [Machine Translation](http://book.paddlepaddle.org/machine_translation/index.en.html) +1. [Recommender System](http://book.paddlepaddle.org/recommender_system/index.en.html) + +This tutorial is contributed by PaddlePaddle, and licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License. diff --git a/README.md b/README.md index 459bfbaefd59deb97b87e3dba80e26e0157c85ce..2aae8fa2f2a1db95b566665c99b20846cbcbba07 100644 --- a/README.md +++ b/README.md @@ -1,27 +1,13 @@ # 深度学习入门 -1. [新手入门](fit_a_line/) [[html](http://book.paddlepaddle.org/fit_a_line)] -1. [识别数字](recognize_digits/) [[html](http://book.paddlepaddle.org/recognize_digits)] -1. [图像分类](image_classification/) [[html](http://book.paddlepaddle.org/image_classification)] -1. [词向量](word2vec/) [[html](http://book.paddlepaddle.org/word2vec)] -1. [情感分析](understand_sentiment/) [[html](http://book.paddlepaddle.org/understand_sentiment)] -1. [语义角色标注](label_semantic_roles/) [[html](http://book.paddlepaddle.org/label_semantic_roles)] -1. [机器翻译](machine_translation/) [[html](http://book.paddlepaddle.org/machine_translation)] -1. [个性化推荐](recommender_system/) [[html](http://book.paddlepaddle.org/recommender_system)] +1. [新手入门](http://book.paddlepaddle.org/fit_a_line) +1. [识别数字](http://book.paddlepaddle.org/recognize_digits) +1. [图像分类](http://book.paddlepaddle.org/image_classification) +1. [词向量](http://book.paddlepaddle.org/word2vec) +1. [情感分析](http://book.paddlepaddle.org/understand_sentiment) +1. [语义角色标注](http://book.paddlepaddle.org/label_semantic_roles) +1. [机器翻译](http://book.paddlepaddle.org/machine_translation) +1. [个性化推荐](http://book.paddlepaddle.org/recommender_system) - -# Deep Learning Introduction - -1. [Fit a Line](fit_a_line/) [[html](http://book.paddlepaddle.org/fit_a_line/index.en.html)] -1. [Recognize Digits](recognize_digits/) [[html](http://book.paddlepaddle.org/recognize_digits/index.en.html)] -1. [Image Classification](image_classification/) [[html](http://book.paddlepaddle.org/image_classification/index.en.html)] -1. [Word to Vector](word2vec/) [[html](http://book.paddlepaddle.org/word2vec/index.en.html)] -1. [Understand Sentiment](understand_sentiment/) [[html](http://book.paddlepaddle.org/understand_sentiment/index.en.html)] -1. [Label Semantic Roles](label_semantic_roles/) [[html](http://book.paddlepaddle.org/label_semantic_roles/index.en.html)] -1. [Machine Translation](machine_translation/) [[html](http://book.paddlepaddle.org/machine_translation/index.en.html)] -1. [Recommender System](recommender_system/) [[html](http://book.paddlepaddle.org/recommender_system/index.en.html)] - -
知识共享许可协议
本教程PaddlePaddle 创作,采用 知识共享 署名-非商业性使用-相同方式共享 4.0 国际 许可协议进行许可。 -This tutorial is contributed by PaddlePaddle, and licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License. diff --git a/fit_a_line/train.py b/fit_a_line/train.py index df665c436872bbaaf5c08790cba69c8ac17e5db7..6fae9e012e6153c6fc84a30ea72d82f2d9a80200 100644 --- a/fit_a_line/train.py +++ b/fit_a_line/train.py @@ -18,9 +18,8 @@ def main(): # create optimizer optimizer = paddle.optimizer.Momentum(momentum=0) - trainer = paddle.trainer.SGD(cost=cost, - parameters=parameters, - update_equation=optimizer) + trainer = paddle.trainer.SGD( + cost=cost, parameters=parameters, update_equation=optimizer) feeding = {'x': 0, 'y': 1} @@ -33,16 +32,14 @@ def main(): if isinstance(event, paddle.event.EndPass): result = trainer.test( - reader=paddle.batch( - uci_housing.test(), batch_size=2), + reader=paddle.batch(uci_housing.test(), batch_size=2), feeding=feeding) print "Test %d, Cost %f" % (event.pass_id, result.cost) # training trainer.train( reader=paddle.batch( - paddle.reader.shuffle( - uci_housing.train(), buf_size=500), + paddle.reader.shuffle(uci_housing.train(), buf_size=500), batch_size=2), feeding=feeding, event_handler=event_handler, diff --git a/image_classification/deprecated/classify.py b/image_classification/deprecated/classify.py index 5a49bc22b0b205f7212c52c482f26720fea4e684..5b6e8a48d4dc72d7e3c69b6b2f46b3d41caee012 100644 --- a/image_classification/deprecated/classify.py +++ b/image_classification/deprecated/classify.py @@ -44,8 +44,9 @@ def vis_square(data, fname): (0, 1)) # add some space between filters + ((0, 0), ) * (data.ndim - 3)) # don't pad the last dimension (if there is one) - data = np.pad(data, padding, mode='constant', - constant_values=1) # pad with ones (white) + data = np.pad( + data, padding, mode='constant', + constant_values=1) # pad with ones (white) # tile the filters into an image data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple( range(4, data.ndim + 1))) diff --git a/image_classification/train.py b/image_classification/train.py index 743b10a50a389b9024a1236d97e1b31157e24896..f8e18452cc08f174ef3c63026c880759734e42f4 100644 --- a/image_classification/train.py +++ b/image_classification/train.py @@ -36,9 +36,8 @@ def main(): # option 2. vgg net = vgg_bn_drop(image) - out = paddle.layer.fc(input=net, - size=classdim, - act=paddle.activation.Softmax()) + out = paddle.layer.fc( + input=net, size=classdim, act=paddle.activation.Softmax()) lbl = paddle.layer.data( name="label", type=paddle.data_type.integer_value(classdim)) @@ -75,9 +74,8 @@ def main(): print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics) # Create trainer - trainer = paddle.trainer.SGD(cost=cost, - parameters=parameters, - update_equation=momentum_optimizer) + trainer = paddle.trainer.SGD( + cost=cost, parameters=parameters, update_equation=momentum_optimizer) trainer.train( reader=paddle.batch( paddle.reader.shuffle( diff --git a/index.en.html b/index.en.html new file mode 100644 index 0000000000000000000000000000000000000000..9c9b2c810fe505b0d62634588c91042c6611a0a2 --- /dev/null +++ b/index.en.html @@ -0,0 +1,76 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/index.html b/index.html index 7f2857bddc690e7a6969c952dbe7750567b9a16e..ff90f5da17cb6b0f20f3bf4372c034d5d164cd4b 100644 --- a/index.html +++ b/index.html @@ -42,31 +42,17 @@ diff --git a/label_semantic_roles/db_lstm.py b/label_semantic_roles/db_lstm.py index 6baaf254b6fdfd3fc273240b017e3ae5cb08a855..ea8c2d117ae904286b201ecc5d55cdfa78198754 100755 --- a/label_semantic_roles/db_lstm.py +++ b/label_semantic_roles/db_lstm.py @@ -75,8 +75,7 @@ settings( learning_method=MomentumOptimizer(momentum=0), learning_rate=2e-2, regularization=L2Regularization(8e-4), - model_average=ModelAverage( - average_window=0.5, max_average_window=10000), ) + model_average=ModelAverage(average_window=0.5, max_average_window=10000), ) ####################################### network ############################## #8 features and 1 target @@ -102,13 +101,12 @@ std_default = ParameterAttribute(initial_std=default_std) predicate_embedding = embedding_layer( size=word_dim, input=predicate, - param_attr=ParameterAttribute( - name='vemb', initial_std=default_std)) + param_attr=ParameterAttribute(name='vemb', initial_std=default_std)) word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] emb_layers = [ - embedding_layer( - size=word_dim, input=x, param_attr=emb_para) for x in word_input + embedding_layer(size=word_dim, input=x, param_attr=emb_para) + for x in word_input ] emb_layers.append(predicate_embedding) mark_embedding = embedding_layer( @@ -120,8 +118,8 @@ hidden_0 = mixed_layer( size=hidden_dim, bias_attr=std_default, input=[ - full_matrix_projection( - input=emb, param_attr=std_default) for emb in emb_layers + full_matrix_projection(input=emb, param_attr=std_default) + for emb in emb_layers ]) mix_hidden_lr = 1e-3 @@ -171,10 +169,8 @@ feature_out = mixed_layer( size=label_dict_len, bias_attr=std_default, input=[ - full_matrix_projection( - input=input_tmp[0], param_attr=hidden_para_attr), - full_matrix_projection( - input=input_tmp[1], param_attr=lstm_para_attr) + full_matrix_projection(input=input_tmp[0], param_attr=hidden_para_attr), + full_matrix_projection(input=input_tmp[1], param_attr=lstm_para_attr) ], ) if not is_predict: diff --git a/label_semantic_roles/train.py b/label_semantic_roles/train.py index 623b614c7874e5f94247b7a94f4573c2a7bf5e77..11b3709c6e9c76e84798c076cb9e1c7d30a98efc 100644 --- a/label_semantic_roles/train.py +++ b/label_semantic_roles/train.py @@ -40,15 +40,14 @@ def db_lstm(): predicate_embedding = paddle.layer.embedding( size=word_dim, input=predicate, - param_attr=paddle.attr.Param( - name='vemb', initial_std=default_std)) + param_attr=paddle.attr.Param(name='vemb', initial_std=default_std)) mark_embedding = paddle.layer.embedding( size=mark_dim, input=mark, param_attr=std_0) word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] emb_layers = [ - paddle.layer.embedding( - size=word_dim, input=x, param_attr=emb_para) for x in word_input + paddle.layer.embedding(size=word_dim, input=x, param_attr=emb_para) + for x in word_input ] emb_layers.append(predicate_embedding) emb_layers.append(mark_embedding) @@ -109,13 +108,12 @@ def db_lstm(): input=input_tmp[1], param_attr=lstm_para_attr) ], ) - crf_cost = paddle.layer.crf(size=label_dict_len, - input=feature_out, - label=target, - param_attr=paddle.attr.Param( - name='crfw', - initial_std=default_std, - learning_rate=mix_hidden_lr)) + crf_cost = paddle.layer.crf( + size=label_dict_len, + input=feature_out, + label=target, + param_attr=paddle.attr.Param( + name='crfw', initial_std=default_std, learning_rate=mix_hidden_lr)) crf_dec = paddle.layer.crf_decoding( name='crf_dec_l', @@ -151,13 +149,11 @@ def main(): model_average=paddle.optimizer.ModelAverage( average_window=0.5, max_average_window=10000), ) - trainer = paddle.trainer.SGD(cost=crf_cost, - parameters=parameters, - update_equation=optimizer) + trainer = paddle.trainer.SGD( + cost=crf_cost, parameters=parameters, update_equation=optimizer) reader = paddle.batch( - paddle.reader.shuffle( - conll05.test(), buf_size=8192), batch_size=10) + paddle.reader.shuffle(conll05.test(), buf_size=8192), batch_size=10) feeding = { 'word_data': 0, diff --git a/machine_translation/api_train.py b/machine_translation/api_train.py index 6efd254e7a48703a69c9f09dd35d41ba7ac5689a..c8d143260b412b0fac78bf387f8b5ebbbef6241d 100644 --- a/machine_translation/api_train.py +++ b/machine_translation/api_train.py @@ -105,9 +105,8 @@ def main(): # define optimize method and trainer optimizer = paddle.optimizer.Adam(learning_rate=1e-4) - trainer = paddle.trainer.SGD(cost=cost, - parameters=parameters, - update_equation=optimizer) + trainer = paddle.trainer.SGD( + cost=cost, parameters=parameters, update_equation=optimizer) # define data reader feeding = { diff --git a/machine_translation/seqToseq_net.py b/machine_translation/seqToseq_net.py index 750d35c0c6b62801d70802ac4dc97f89d09fc612..fce466870f9c46525d460efa897e22e795862caa 100644 --- a/machine_translation/seqToseq_net.py +++ b/machine_translation/seqToseq_net.py @@ -110,8 +110,7 @@ group_inputs = [group_input1, group_input2] if not is_generating: trg_embedding = embedding_layer( - input=data_layer( - name='target_language_word', size=target_dict_dim), + input=data_layer(name='target_language_word', size=target_dict_dim), size=word_vector_dim, param_attr=ParamAttr(name='_target_language_embedding')) group_inputs.append(trg_embedding) @@ -156,8 +155,7 @@ else: seqtext_printer_evaluator( input=beam_gen, - id_input=data_layer( - name="sent_id", size=1), + id_input=data_layer(name="sent_id", size=1), dict_file=trg_lang_dict, result_file=gen_trans_file) outputs(beam_gen) diff --git a/recognize_digits/train.py b/recognize_digits/train.py index 0ef0ebcac9a24c82f588975b6445c87cbf108e68..b90e93a6d578fbb47e6bae8dfb0ea77ea4cee9eb 100644 --- a/recognize_digits/train.py +++ b/recognize_digits/train.py @@ -2,9 +2,8 @@ import paddle.v2 as paddle def softmax_regression(img): - predict = paddle.layer.fc(input=img, - size=10, - act=paddle.activation.Softmax()) + predict = paddle.layer.fc( + input=img, size=10, act=paddle.activation.Softmax()) return predict @@ -12,14 +11,12 @@ def multilayer_perceptron(img): # The first fully-connected layer hidden1 = paddle.layer.fc(input=img, size=128, act=paddle.activation.Relu()) # The second fully-connected layer and the according activation function - hidden2 = paddle.layer.fc(input=hidden1, - size=64, - act=paddle.activation.Relu()) + hidden2 = paddle.layer.fc( + input=hidden1, size=64, act=paddle.activation.Relu()) # The thrid fully-connected layer, note that the hidden size should be 10, # which is the number of unique digits - predict = paddle.layer.fc(input=hidden2, - size=10, - act=paddle.activation.Softmax()) + predict = paddle.layer.fc( + input=hidden2, size=10, act=paddle.activation.Softmax()) return predict @@ -43,14 +40,12 @@ def convolutional_neural_network(img): pool_stride=2, act=paddle.activation.Tanh()) # The first fully-connected layer - fc1 = paddle.layer.fc(input=conv_pool_2, - size=128, - act=paddle.activation.Tanh()) + fc1 = paddle.layer.fc( + input=conv_pool_2, size=128, act=paddle.activation.Tanh()) # The softmax layer, note that the hidden size should be 10, # which is the number of unique digits - predict = paddle.layer.fc(input=fc1, - size=10, - act=paddle.activation.Softmax()) + predict = paddle.layer.fc( + input=fc1, size=10, act=paddle.activation.Softmax()) return predict @@ -76,9 +71,8 @@ optimizer = paddle.optimizer.Momentum( momentum=0.9, regularization=paddle.optimizer.L2Regularization(rate=0.0005 * 128)) -trainer = paddle.trainer.SGD(cost=cost, - parameters=parameters, - update_equation=optimizer) +trainer = paddle.trainer.SGD( + cost=cost, parameters=parameters, update_equation=optimizer) lists = [] @@ -99,8 +93,7 @@ def event_handler(event): trainer.train( reader=paddle.batch( - paddle.reader.shuffle( - paddle.dataset.mnist.train(), buf_size=8192), + paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=8192), batch_size=128), event_handler=event_handler, num_passes=100) diff --git a/recommender_system/data/meta_generator.py b/recommender_system/data/meta_generator.py index 38e4679d266c331a751114cd13f0e3453016cf26..b15d8ec31c4b2696ac6e8bb36a57c068f0573d81 100644 --- a/recommender_system/data/meta_generator.py +++ b/recommender_system/data/meta_generator.py @@ -208,8 +208,8 @@ class EmbeddingFieldParser(object): elif config['dict']['type'] == 'split': self.dict = SplitEmbeddingDict(config['dict'].get('delimiter', ',')) elif config['dict']['type'] == 'whole_content': - self.dict = EmbeddingFieldParser.WholeContentDict(config['dict'][ - 'sort']) + self.dict = EmbeddingFieldParser.WholeContentDict( + config['dict']['sort']) else: print config assert False diff --git a/understand_sentiment/train.py b/understand_sentiment/train.py index 1c856556bd0cb32f60eba322469b3621c37e1349..7878f00b6401ed0e6a0863d2cec129b6e51b163d 100644 --- a/understand_sentiment/train.py +++ b/understand_sentiment/train.py @@ -24,9 +24,8 @@ def convolution_net(input_dim, class_dim=2, emb_dim=128, hid_dim=128): input=emb, context_len=3, hidden_size=hid_dim) conv_4 = paddle.networks.sequence_conv_pool( input=emb, context_len=4, hidden_size=hid_dim) - output = paddle.layer.fc(input=[conv_3, conv_4], - size=class_dim, - act=paddle.activation.Softmax()) + output = paddle.layer.fc( + input=[conv_3, conv_4], size=class_dim, act=paddle.activation.Softmax()) lbl = paddle.layer.data("label", paddle.data_type.integer_value(2)) cost = paddle.layer.classification_cost(input=output, label=lbl) return cost @@ -64,20 +63,19 @@ def stacked_lstm_net(input_dim, paddle.data_type.integer_value_sequence(input_dim)) emb = paddle.layer.embedding(input=data, size=emb_dim) - fc1 = paddle.layer.fc(input=emb, - size=hid_dim, - act=linear, - bias_attr=bias_attr) + fc1 = paddle.layer.fc( + input=emb, size=hid_dim, act=linear, bias_attr=bias_attr) lstm1 = paddle.layer.lstmemory( input=fc1, act=relu, bias_attr=bias_attr, layer_attr=layer_attr) inputs = [fc1, lstm1] for i in range(2, stacked_num + 1): - fc = paddle.layer.fc(input=inputs, - size=hid_dim, - act=linear, - param_attr=para_attr, - bias_attr=bias_attr) + fc = paddle.layer.fc( + input=inputs, + size=hid_dim, + act=linear, + param_attr=para_attr, + bias_attr=bias_attr) lstm = paddle.layer.lstmemory( input=fc, reverse=(i % 2) == 0, @@ -90,11 +88,12 @@ def stacked_lstm_net(input_dim, input=inputs[0], pooling_type=paddle.pooling.Max()) lstm_last = paddle.layer.pooling( input=inputs[1], pooling_type=paddle.pooling.Max()) - output = paddle.layer.fc(input=[fc_last, lstm_last], - size=class_dim, - act=paddle.activation.Softmax(), - bias_attr=bias_attr, - param_attr=para_attr) + output = paddle.layer.fc( + input=[fc_last, lstm_last], + size=class_dim, + act=paddle.activation.Softmax(), + bias_attr=bias_attr, + param_attr=para_attr) lbl = paddle.layer.data("label", paddle.data_type.integer_value(2)) cost = paddle.layer.classification_cost(input=output, label=lbl) @@ -148,9 +147,8 @@ if __name__ == '__main__': print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics) # create trainer - trainer = paddle.trainer.SGD(cost=cost, - parameters=parameters, - update_equation=adam_optimizer) + trainer = paddle.trainer.SGD( + cost=cost, parameters=parameters, update_equation=adam_optimizer) trainer.train( reader=train_reader, diff --git a/word2vec/train.py b/word2vec/train.py index 15ad6a01cc2230ad1c8a6a44c1d3d828331a0d1d..3600025863cd91e9b2e2c1c0ffb19af9fc28070d 100644 --- a/word2vec/train.py +++ b/word2vec/train.py @@ -40,18 +40,19 @@ def main(): Efourth = wordemb(fourthword) contextemb = paddle.layer.concat(input=[Efirst, Esecond, Ethird, Efourth]) - hidden1 = paddle.layer.fc(input=contextemb, - size=hiddensize, - act=paddle.activation.Sigmoid(), - layer_attr=paddle.attr.Extra(drop_rate=0.5), - bias_attr=paddle.attr.Param(learning_rate=2), - param_attr=paddle.attr.Param( - initial_std=1. / math.sqrt(embsize * 8), - learning_rate=1)) - predictword = paddle.layer.fc(input=hidden1, - size=dict_size, - bias_attr=paddle.attr.Param(learning_rate=2), - act=paddle.activation.Softmax()) + hidden1 = paddle.layer.fc( + input=contextemb, + size=hiddensize, + act=paddle.activation.Sigmoid(), + layer_attr=paddle.attr.Extra(drop_rate=0.5), + bias_attr=paddle.attr.Param(learning_rate=2), + param_attr=paddle.attr.Param( + initial_std=1. / math.sqrt(embsize * 8), learning_rate=1)) + predictword = paddle.layer.fc( + input=hidden1, + size=dict_size, + bias_attr=paddle.attr.Param(learning_rate=2), + act=paddle.activation.Softmax()) def event_handler(event): if isinstance(event, paddle.event.EndIteration):