From 1fe89e8afaab942a5708654d638a5293803b0877 Mon Sep 17 00:00:00 2001 From: guru4elephant <35550832+guru4elephant@users.noreply.github.com> Date: Thu, 29 Nov 2018 23:34:28 +0800 Subject: [PATCH] Update async_executor.md --- doc/fluid/design/async_executor/async_executor.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/fluid/design/async_executor/async_executor.md b/doc/fluid/design/async_executor/async_executor.md index dc75e6072..216c8d967 100644 --- a/doc/fluid/design/async_executor/async_executor.md +++ b/doc/fluid/design/async_executor/async_executor.md @@ -9,7 +9,7 @@ def train_loop(): with tarfile.open(paddle.dataset.common.download(URL, "imdb", MD5)) as tarf: tarf.extractall(path='./') tarf.close() - # Initialize dataset description + # Initialize dataset description dataset = fluid.DataFeedDesc('train_data/data.prototxt') dataset.set_batch_size(128) # See API doc for how to change other fields print dataset.desc() # Debug purpose: see what we get @@ -18,7 +18,7 @@ def train_loop(): name="words", shape=[1], dtype="int64", lod_level=1) # label data label = fluid.layers.data(name="label", shape=[1], dtype="int64") - avg_cost, acc, prediction = bow_net(data, label) + avg_cost, acc, prediction = bow_net(data, label) sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=0.002) opt_ops, weight_and_grad = sgd_optimizer.minimize(avg_cost) # Run startup program @@ -61,7 +61,7 @@ void AsyncExecutor::RunFromFile(const ProgramDesc& main_program, const std::vector& fetch_var_names, const bool debug) { std::vector threads; - auto& block = main_program.Block(0); + auto& block = main_program.Block(0); for (auto var_name : fetch_var_names) { auto var_desc = block.FindVar(var_name); auto shapes = var_desc->GetShape(); @@ -83,7 +83,7 @@ void AsyncExecutor::RunFromFile(const ProgramDesc& main_program, } std::vector> readers; PrepareReaders(readers, actual_thread_num, data_feed_desc, filelist); - std::vector> workers; + std::vector> workers; workers.resize(actual_thread_num); for (auto& worker : workers) { worker.reset(new ExecutorThreadWorker); -- GitLab