From c02f773a537b1693a3fad941980d052aa36cc766 Mon Sep 17 00:00:00 2001 From: Yancey Date: Tue, 27 Feb 2018 14:47:47 +0800 Subject: [PATCH] Fix dist demo var type error (#8600) * Fix dist demo error * revert trainer_id --- python/paddle/fluid/framework.py | 1 + .../fluid/tests/book_distribute/notest_dist_fit_a_line.py | 3 +-- .../book_distribute/notest_dist_image_classification.py | 1 + .../book_distribute/notest_dist_label_semantic_roles.py | 1 + .../fluid/tests/book_distribute/notest_dist_word2vec.py | 4 +++- .../tests/book_distribute/notest_machine_translation.py | 1 + .../book_distribute/notest_recognize_digits_conv_dist.py | 6 +----- .../tests/book_distribute/notest_recommender_system_dist.py | 1 + .../notest_understand_sentiment_conv_dist.py | 1 + .../notest_understand_sentiment_dynamic_lstm.py | 1 + 10 files changed, 12 insertions(+), 8 deletions(-) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 64441e8fa4..2e23ddc9be 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -784,6 +784,7 @@ class Block(object): elif type(v) == Variable: var = Variable( self, + type=v.type, name=new_name, error_clip=error_clip, stop_gradient=stop_gradient) diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py b/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py index 01c1fa24fd..cff82a8948 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py +++ b/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py @@ -48,6 +48,7 @@ current_endpoint = os.getenv("SERVER_ENDPOINT") # run as trainer or parameter server training_role = os.getenv("TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver + t.transpile(optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) if training_role == "PSERVER": @@ -65,8 +66,6 @@ else: PASS_NUM = 100 for pass_id in range(PASS_NUM): - fluid.io.save_persistables(exe, "./fit_a_line.model/") - fluid.io.load_persistables(exe, "./fit_a_line.model/") for data in train_reader(): avg_loss_value = exe.run(trainer_prog, feed=feeder.feed(data), diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py b/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py index e9101fd763..46630db43e 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py +++ b/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py @@ -138,6 +138,7 @@ current_endpoint = os.getenv("SERVER_ENDPOINT") # run as trainer or parameter server training_role = os.getenv("TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver + t.transpile( optimize_ops, params_grads, pservers=pserver_endpoints, trainers=TRAINERS) diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py b/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py index 2d0c54fa7c..3ec85517ab 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py +++ b/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py @@ -191,6 +191,7 @@ def main(): # run as trainer or parameter server training_role = os.getenv( "TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver + t.transpile( optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py b/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py index 6304927364..8164ba5428 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py +++ b/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py @@ -82,6 +82,7 @@ current_endpoint = os.getenv("SERVER_ENDPOINT") # run as trainer or parameter server training_role = os.getenv("TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver + t.transpile( optimize_ops, params_grads, pservers=pserver_endpoints, trainers=TRAINERS) if training_role == "PSERVER": @@ -97,9 +98,10 @@ elif training_role == "TRAINER": feed_list=[first_word, second_word, third_word, forth_word, next_word], place=place) exe.run(fluid.default_startup_program()) + trainer_prog = t.get_trainer_program() for pass_id in range(PASS_NUM): for data in train_reader(): - avg_cost_np = exe.run(t.get_trainer_program(), + avg_cost_np = exe.run(trainer_prog, feed=feeder.feed(data), fetch_list=[avg_cost]) print("avg_cost_np", avg_cost_np) diff --git a/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py b/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py index f5ef08430e..fee8db2497 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py +++ b/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py @@ -115,6 +115,7 @@ def main(): # run as trainer or parameter server training_role = os.getenv( "TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver + t.transpile( optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) diff --git a/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py b/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py index eae1fe62af..b6ad6a992d 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py +++ b/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py @@ -64,11 +64,7 @@ if not current_endpoint: t = fluid.DistributeTranspiler() t.transpile( - optimize_ops, - params_grads, - 0, - pservers=pserver_endpoints, - trainers=trainers) + optimize_ops, params_grads, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) diff --git a/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py b/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py index 4329c821c2..741ec33639 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py +++ b/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py @@ -171,6 +171,7 @@ def main(): current_endpoint = os.getenv("SERVER_ENDPOINT") # run as trainer or parameter server training_role = os.getenv("TRAINING_ROLE", "TRAINER") + t.transpile( optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) diff --git a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py index ee0d8597b7..0467184bbf 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py +++ b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py @@ -90,6 +90,7 @@ def main(): # run as trainer or parameter server training_role = os.getenv( "TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver + t.transpile( optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) diff --git a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py index fa792cbf92..1e13385852 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py +++ b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py @@ -102,6 +102,7 @@ def main(): # run as trainer or parameter server training_role = os.getenv( "TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver + t.transpile( optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) -- GitLab