diff --git a/paddle/framework/lod_tensor.cc b/paddle/framework/lod_tensor.cc index 6853b7ee5f9ec849784786261b5d7d4c462ba628..ef85ed69dbe87d4b5b1b1b5d6a04220a5266a635 100644 --- a/paddle/framework/lod_tensor.cc +++ b/paddle/framework/lod_tensor.cc @@ -270,10 +270,10 @@ std::vector LoDTensor::SplitLoDTensor( "Batch size should be divided by places size"); std::vector lods; - for (int place_idx = 0; place_idx < places.size(); ++place_idx) { - int begin = place_idx * dims()[0] / places.size(); - int end = (place_idx + 1) * dims()[0] / places.size(); - auto src = Slice(begin, end); + for (size_t place_idx = 0; place_idx < places.size(); ++place_idx) { + size_t begin = place_idx * dims()[0] / places.size(); + size_t end = (place_idx + 1) * dims()[0] / places.size(); + auto src = Slice(static_cast(begin), static_cast(end)); LoDTensor dst; dst.Resize(src.dims()); diff --git a/paddle/operators/parallel_do_op.cc b/paddle/operators/parallel_do_op.cc index 348356f28d645978a3a6581ecc525235abd325c1..077245cd83b2535b45927c8350ca7bc14c541d21 100644 --- a/paddle/operators/parallel_do_op.cc +++ b/paddle/operators/parallel_do_op.cc @@ -12,23 +12,23 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include #include "paddle/framework/executor.h" #include "paddle/framework/op_registry.h" +#include "paddle/framework/threadpool.h" namespace paddle { namespace operators { -constexpr char kInputs[] = "inputs"; -constexpr char kParameters[] = "parameters"; -constexpr char kPlaces[] = "places"; +static constexpr char kInputs[] = "inputs"; +static constexpr char kParameters[] = "parameters"; +static constexpr char kPlaces[] = "places"; -constexpr char kOutputs[] = "outputs"; -constexpr char kParallelScopes[] = "parallel_scopes"; +static constexpr char kOutputs[] = "outputs"; +static constexpr char kParallelScopes[] = "parallel_scopes"; -constexpr char kParallelBlock[] = "sub_block"; +static constexpr char kParallelBlock[] = "sub_block"; // using ParallelScopeVar = std::vector; using LoDTensor = framework::LoDTensor; @@ -85,7 +85,8 @@ class ParallelDoOp : public framework::OperatorBase { SplitTensorAndMoveTensorToScopes(scope, sub_scopes, places, Inputs(kInputs)); - std::vector workers; + std::vector> workers; + workers.reserve(places.size()); for (size_t place_idx = 0; place_idx < places.size(); ++place_idx) { VLOG(3) << "Run " << place_idx; @@ -93,26 +94,27 @@ class ParallelDoOp : public framework::OperatorBase { auto *cur_scope = sub_scopes[place_idx]; // copy parameter - if (dev_ctx.GetPlace() != place) { + // some version of boost lacks != for boost::variant + if (!(dev_ctx.GetPlace() == place)) { PADDLE_THROW("Not Implemented"); } - // execute - workers.push_back(std::thread([program, cur_scope, place, block] { - auto executor = framework::Executor(place); + workers.emplace_back(framework::Async([program, cur_scope, place, block] { + framework::Executor executor(place); executor.Run(*program, cur_scope, block->ID(), false /*create_local_scope*/); })); } for (auto &worker : workers) { - worker.join(); + worker.wait(); } // merge output for (auto &o_name : Outputs(kOutputs)) { std::vector lod_tensors; + lod_tensors.reserve(sub_scopes.size()); for (auto *sub_scope : sub_scopes) { - lod_tensors.push_back(&sub_scope->FindVar(o_name)->Get()); + lod_tensors.emplace_back(&sub_scope->FindVar(o_name)->Get()); } auto *lod_tensor_to_be_merged = @@ -177,7 +179,7 @@ class ParallelDoGradOp : public OperatorBase { } // exe run - std::vector workers; + std::vector> workers; for (size_t place_idx = 0; place_idx < places.size(); ++place_idx) { VLOG(3) << "Run " << place_idx; @@ -185,14 +187,14 @@ class ParallelDoGradOp : public OperatorBase { auto *cur_scope = sub_scopes[place_idx]; // execute - workers.push_back(std::thread([program, cur_scope, place, block] { - auto executor = framework::Executor(place); + workers.emplace_back(framework::Async([program, cur_scope, place, block] { + framework::Executor executor(place); executor.Run(*program, cur_scope, block->ID(), false /*create_local_scope*/); })); } for (auto &worker : workers) { - worker.join(); + worker.wait(); } // merge grad diff --git a/python/paddle/v2/fluid/backward.py b/python/paddle/v2/fluid/backward.py index ac60bf543600008fd5339c1a378951374afc4ad6..88fe19da5e2c2df7f7eed7b26261ec155f0013f7 100644 --- a/python/paddle/v2/fluid/backward.py +++ b/python/paddle/v2/fluid/backward.py @@ -205,6 +205,7 @@ def _append_backward_ops_(target, # Getting op's corresponding grad_op grad_op_desc, op_grad_to_var = core.get_grad_op_desc( op.desc, no_grad_dict[block.idx], grad_sub_block_list) + grad_op_descs.extend(grad_op_desc) grad_to_var.update(op_grad_to_var)