diff --git a/doc/design/distributed_lookup_table_design.md b/doc/fluid/design/dist_train/distributed_lookup_table_design.md similarity index 97% rename from doc/design/distributed_lookup_table_design.md rename to doc/fluid/design/dist_train/distributed_lookup_table_design.md index a09f2818c888397b07fc7d09ecd20056f4176982..e543adf0f97cc6b47415b807d7a1ed1effec9b22 100644 --- a/doc/design/distributed_lookup_table_design.md +++ b/doc/fluid/design/dist_train/distributed_lookup_table_design.md @@ -26,7 +26,7 @@ lookup of rows. The following figure illustrates the multiplication of x with two non-zero elements, or say, two symbols, and a lookup table W: -![lookup table](./lookup_table.png) +![lookup table](./src/lookup_table.png) ### The Backward Algorithm @@ -42,7 +42,7 @@ or some more sophisticated algorithms that rely on both W' and W: $$W = f(W, W')$$ The following figure illustrates the backward pass of the lookup -operator: ![lookup table training](./lookup_table_training.png) +operator: ![lookup table training](./src/lookup_table_training.png) ## Distributed Storage Service diff --git a/doc/design/lookup_table.png b/doc/fluid/design/dist_train/src/lookup_table.png similarity index 100% rename from doc/design/lookup_table.png rename to doc/fluid/design/dist_train/src/lookup_table.png diff --git a/doc/design/lookup_table_training.png b/doc/fluid/design/dist_train/src/lookup_table_training.png similarity index 100% rename from doc/design/lookup_table_training.png rename to doc/fluid/design/dist_train/src/lookup_table_training.png diff --git a/doc/fluid/design/motivation/fluid.md b/doc/fluid/design/motivation/fluid.md index f78fa8c1914124f33b9730f918c8887ced4f8d9d..110b7d78bf12ac8328fb3a913e4386e75d63c995 100644 --- a/doc/fluid/design/motivation/fluid.md +++ b/doc/fluid/design/motivation/fluid.md @@ -103,7 +103,7 @@ In computability theory, a system of data-manipulation rules, such as a programm There are two ways to execute a Fluid program. When a program is executed, it creates a protobuf message [`ProgramDesc`](https://github.com/PaddlePaddle/Paddle/blob/a91efdde6910ce92a78e3aa7157412c4c88d9ee8/paddle/framework/framework.proto#L145) that describes the process and is conceptually like an [abstract syntax tree](https://en.wikipedia.org/wiki/Abstract_syntax_tree). -There is a C++ class [`Executor`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/executor.h), which runs a `ProgramDesc`, similar to how an interpreter runs a Python program. +There is a C++ class [`Executor`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/executor.h), which runs a `ProgramDesc`, similar to how an interpreter runs a Python program. Fluid is moving towards the direction of a compiler, which is explain in [fluid_compiler.md](fluid_compiler.md). diff --git a/paddle/fluid/framework/channel_test.cc b/paddle/fluid/framework/channel_test.cc index edfb41c72489113d9803c2957baed1ce44f8296d..73be5cdbe2a1f5994ecee4c415e83962f50532fe 100644 --- a/paddle/fluid/framework/channel_test.cc +++ b/paddle/fluid/framework/channel_test.cc @@ -871,3 +871,67 @@ TEST(ChannelHolder, ChannelHolderDestroyUnblocksSendersTest) { ch->Reset(0); ChannelHolderDestroyUnblockSenders(ch, false); } + +// This tests that closing a channelholder many times. +void ChannelHolderManyTimesClose(ChannelHolder *ch) { + const int num_threads = 15; + std::thread t[num_threads]; + bool thread_ended[num_threads]; + + // Launches threads that try to send data to channel. + for (size_t i = 0; i < num_threads / 3; i++) { + thread_ended[i] = false; + t[i] = std::thread( + [&](bool *ended) { + int data = 10; + ch->Send(&data); + *ended = true; + }, + &thread_ended[i]); + } + + // Launches threads that try to receive data to channel. + for (size_t i = num_threads / 3; i < 2 * num_threads / 3; i++) { + thread_ended[i] = false; + t[i] = std::thread( + [&](bool *p) { + int data; + if (ch->Receive(&data)) { + EXPECT_EQ(data, 10); + } + *p = true; + }, + &thread_ended[i]); + } + + // Launches threads that try to close the channel. + for (size_t i = 2 * num_threads / 3; i < num_threads; i++) { + thread_ended[i] = false; + t[i] = std::thread( + [&](bool *p) { + if (!ch->IsClosed()) { + ch->close(); + } + *p = true; + }, + &thread_ended[i]); + } + + std::this_thread::sleep_for(std::chrono::milliseconds(100)); // wait + + // Verify that all threads are unblocked + for (size_t i = 0; i < num_threads; i++) { + EXPECT_EQ(thread_ended[i], true); + } + EXPECT_TRUE(ch->IsClosed()); + // delete the channel + delete ch; + for (size_t i = 0; i < num_threads; i++) t[i].join(); +} + +TEST(ChannelHolder, ChannelHolderManyTimesCloseTest) { + // Check for Buffered Channel + ChannelHolder *ch = new ChannelHolder(); + ch->Reset(10); + ChannelHolderManyTimesClose(ch); +} diff --git a/paddle/fluid/operators/math/CMakeLists.txt b/paddle/fluid/operators/math/CMakeLists.txt index fba1612d10f0494f4ab06fabdd0e799a74dafd53..547d081006f1c28ba73cb02d38e36bb612cea494 100644 --- a/paddle/fluid/operators/math/CMakeLists.txt +++ b/paddle/fluid/operators/math/CMakeLists.txt @@ -43,7 +43,7 @@ math_library(sequence2batch) math_library(sequence_padding) math_library(sequence_pooling DEPS math_function) math_library(sequence_scale) -math_library(softmax) +math_library(softmax DEPS math_function) math_library(unpooling) math_library(vol2col) diff --git a/paddle/fluid/operators/math/concat.cc b/paddle/fluid/operators/math/concat.cc index b542143419e05e9baf29e9a2322447f32ddd9829..b672c79afd97e36894af647fd4bc6edfb885ff13 100644 --- a/paddle/fluid/operators/math/concat.cc +++ b/paddle/fluid/operators/math/concat.cc @@ -44,7 +44,7 @@ class ConcatFunctor { out_cols += t_cols; input_cols[i] = t_cols; } - auto& cpu_place = boost::get(context.GetPlace()); + auto cpu_place = boost::get(context.GetPlace()); // computation for (int k = 0; k < out_rows; ++k) { @@ -87,7 +87,7 @@ class ConcatGradFunctor { input_cols += t_cols; output_cols[i] = t_cols; } - auto& cpu_place = boost::get(context.GetPlace()); + auto cpu_place = boost::get(context.GetPlace()); // computation for (int k = 0; k < input_rows; ++k) { diff --git a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc index d0de092947eb04a1b7d06dedea919f6b1094dd06..bd0bb2ee3b0252f47318c59d9940d8dd478723de 100644 --- a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc +++ b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc @@ -48,20 +48,24 @@ class DoubleBufferReader : public framework::DecoratedReader { void start_thread() { buffer_ = framework::MakeChannel(kDoubleBufferSize); - std::thread prefetch([this] { PrefetchThreadFunc(); }); - prefetch.detach(); + prefetcher_ = std::thread([this] { PrefetchThreadFunc(); }); } void ReadNext(std::vector* out) override; void ReInit() override; - ~DoubleBufferReader() { buffer_->Close(); } + ~DoubleBufferReader() { + buffer_->Close(); + prefetcher_.join(); + delete buffer_; + } bool HasNext() const override; private: void PrefetchThreadFunc(); + std::thread prefetcher_; framework::Channel* buffer_; platform::Place place_; std::vector> ctxs_; @@ -134,6 +138,8 @@ void DoubleBufferReader::ReadNext(std::vector* out) { void DoubleBufferReader::ReInit() { reader_->ReInit(); buffer_->Close(); + prefetcher_.join(); + delete buffer_; start_thread(); } @@ -159,11 +165,12 @@ void DoubleBufferReader::PrefetchThreadFunc() { if (!buffer_->Send(&batch)) { VLOG(5) << "WARNING: The double buffer channel has been closed. The " - "prefetch thread terminates."; + "prefetch thread will terminate."; break; } } buffer_->Close(); + VLOG(5) << "Prefetch thread terminates."; } bool DoubleBufferReader::HasNext() const { diff --git a/paddle/fluid/operators/reader/create_shuffle_reader_op.cc b/paddle/fluid/operators/reader/create_shuffle_reader_op.cc index 70e2f587dc414a850ddc341b98f26ae54636755c..3a1f3805a0483c2f5eabdc7432556051d8308964 100644 --- a/paddle/fluid/operators/reader/create_shuffle_reader_op.cc +++ b/paddle/fluid/operators/reader/create_shuffle_reader_op.cc @@ -34,6 +34,9 @@ class ShuffleReader : public framework::DecoratedReader { } void ReadNext(std::vector* out) override { + if (!HasNext()) { + PADDLE_THROW("There is no next data!"); + } if (iteration_pos_ >= buffer_.size()) { VLOG(10) << "Resetting shuffle buffer"; ReadIntoBuffers(); @@ -50,7 +53,6 @@ class ShuffleReader : public framework::DecoratedReader { buffer_.clear(); buffer_.reserve(buffer_size_); iteration_pos_ = 0; - PADDLE_ENFORCE(reader_->HasNext()); for (size_t i = 0; i < buffer_size_; ++i) { if (!reader_->HasNext()) { break; diff --git a/python/paddle/fluid/concurrency.py b/python/paddle/fluid/concurrency.py index 535e881c42f675198a2679cb7974af64b65cc194..0fc4981a8e9da09f15e6d0a5e5c6761e01328876 100644 --- a/python/paddle/fluid/concurrency.py +++ b/python/paddle/fluid/concurrency.py @@ -131,7 +131,7 @@ def make_channel(dtype, capacity=0): return channel -def channel_send(channel, value): +def channel_send(channel, value, copy=False): """ Sends a value through a channel variable. Used by an unbuffered or buffered channel to pass data from within or to a concurrent Go block, where @@ -141,6 +141,8 @@ def channel_send(channel, value): channel (Variable|Channel): Channel variable created using `make_channel`. value (Variable): Value to send to channel + copy (bool): Copy data while channel send. If False, then data + is moved. The input cannot be used after move. Returns: Variable: The boolean status on whether or not the channel successfully sent the passed value. @@ -162,11 +164,26 @@ def channel_send(channel, value): type=core.VarDesc.VarType.LOD_TENSOR, dtype=core.VarDesc.VarType.BOOL) + X = value + + if copy is True: + copied_X = helper.create_variable( + name=unique_name.generate(value.name + '_copy'), + type=value.type, + dtype=value.dtype, + shape=value.shape, + lod_level=value.lod_level, + capacity=value.capacity) + + assign_op = channel_send_block.append_op( + type="assign_op", inputs={"X": value}, outputs={"Out": copied_X}) + X = copied_X + channel_send_op = channel_send_block.append_op( type="channel_send", inputs={ "Channel": channel, - "X": value, + "X": X, }, outputs={"Status": status})