提交 0ed3f359 编写于 作者: Y Yang Yang

pass parallel_do cbow

上级 af8cb820
...@@ -31,6 +31,7 @@ static constexpr char kParallelScopes[] = "parallel_scopes"; ...@@ -31,6 +31,7 @@ static constexpr char kParallelScopes[] = "parallel_scopes";
static constexpr char kParallelBlock[] = "sub_block"; static constexpr char kParallelBlock[] = "sub_block";
using LoDTensor = framework::LoDTensor; using LoDTensor = framework::LoDTensor;
using SelectedRows = framework::SelectedRows;
static void SplitTensorAndMoveTensorToScopes( static void SplitTensorAndMoveTensorToScopes(
const framework::Scope &scope, std::vector<framework::Scope *> *sub_scopes, const framework::Scope &scope, std::vector<framework::Scope *> *sub_scopes,
...@@ -64,6 +65,30 @@ static void SplitTensorAndMoveTensorToScopes( ...@@ -64,6 +65,30 @@ static void SplitTensorAndMoveTensorToScopes(
} }
} }
inline void CopyOrShare(const framework::Variable& src,
const platform::Place& dst_place,
framework::Variable* dst) {
if (src.IsType<LoDTensor>()) {
if (src.Get<LoDTensor>().place() == dst_place) {
dst->GetMutable<LoDTensor>()->ShareDataWith(src.Get<LoDTensor>());
} else {
Copy(src.Get<LoDTensor>(), dst_place, dst->GetMutable<LoDTensor>());
}
} else if (src.IsType<SelectedRows>()) {
auto &src_sr = src.Get<SelectedRows>();
auto *dst_sr = dst->GetMutable<SelectedRows>();
dst_sr->set_rows(src_sr.rows());
dst_sr->set_height(src_sr.height());
Copy(src_sr.value(), dst_place, dst_sr->mutable_value());
// if (src_sr.value().place() == dst_place) {
// dst_sr->mutable_value()->ShareDataWith(src_sr.value());
// } else {
// }
} else {
PADDLE_THROW("Expect LoDTensor/SelectedRows, get %s", src.Type().name());
}
}
void WaitOnPlace(const platform::Place place) { void WaitOnPlace(const platform::Place place) {
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
auto &dev_ctx = *pool.Get(place); auto &dev_ctx = *pool.Get(place);
...@@ -149,6 +174,7 @@ class ParallelDoOp : public framework::OperatorBase { ...@@ -149,6 +174,7 @@ class ParallelDoOp : public framework::OperatorBase {
lod_tensor_to_be_merged->MergeLoDTensor(lod_tensors, dev_ctx.GetPlace()); lod_tensor_to_be_merged->MergeLoDTensor(lod_tensors, dev_ctx.GetPlace());
} }
WaitOnPlaces(places); WaitOnPlaces(places);
LOG(INFO) << "End of ParallelGradDo";
} }
}; };
...@@ -210,21 +236,27 @@ class ParallelDoGradOp : public framework::OperatorBase { ...@@ -210,21 +236,27 @@ class ParallelDoGradOp : public framework::OperatorBase {
} }
WaitOnPlaces(places); WaitOnPlaces(places);
// merge grad AccumulateGrad(scope, place, sub_scopes, places);
LOG(INFO) << "End of ParallelDoGrad";
}
void AccumulateGrad(const framework::Scope &scope,
const platform::Place &place,
const std::vector<framework::Scope *> &sub_scopes,
const platform::PlaceList &places) const {
for (auto &s : Outputs(framework::GradVarName(kParameters))) { for (auto &s : Outputs(framework::GradVarName(kParameters))) {
auto &result = sub_scopes[0]->FindVar(s)->Get<LoDTensor>(); std::__cxx11::string tmp_name;
std::string tmp_name; auto *tmp = sub_scopes[0]->Var(&tmp_name);
auto *tmp = sub_scopes[0]->Var(&tmp_name)->GetMutable<LoDTensor>();
LOG(INFO) << "---" << s;
for (size_t i = 1; i < sub_scopes.size(); ++i) { for (size_t i = 1; i < sub_scopes.size(); ++i) {
auto &tensor_to_merge = sub_scopes[i]->FindVar(s)->Get<LoDTensor>();
if (!(places[i] == places[0])) { if (!(places[i] == places[0])) {
framework::Copy(tensor_to_merge, places[0], tmp); LOG(INFO) << "---";
CopyOrShare(*sub_scopes[i]->FindVar(s), places[0], tmp);
WaitOnPlace(places[0]); WaitOnPlace(places[0]);
} else {
tmp->ShareDataWith(tensor_to_merge);
} }
LOG(INFO) << "---";
auto sum_op = framework::OpRegistry::CreateOp( auto sum_op = framework::OpRegistry::CreateOp(
"sum", {{"X", {s, tmp_name}}}, {{"Out", {s}}}, "sum", {{"X", {s, tmp_name}}}, {{"Out", {s}}},
framework::AttributeMap{}); framework::AttributeMap{});
...@@ -232,8 +264,8 @@ class ParallelDoGradOp : public framework::OperatorBase { ...@@ -232,8 +264,8 @@ class ParallelDoGradOp : public framework::OperatorBase {
WaitOnPlace(places[0]); WaitOnPlace(places[0]);
} }
VLOG(3) << result; LOG(INFO) << "---";
framework::Copy(result, place, scope.FindVar(s)->GetMutable<LoDTensor>()); CopyOrShare(*sub_scopes[0]->FindVar(s), place, scope.FindVar(s));
} }
WaitOnPlaces(places); WaitOnPlaces(places);
} }
...@@ -289,7 +321,7 @@ class ParallelDoGradOpShapeInference : public framework::InferShapeBase { ...@@ -289,7 +321,7 @@ class ParallelDoGradOpShapeInference : public framework::InferShapeBase {
PADDLE_ENFORCE(ctx->HasInputs(kParameters)); PADDLE_ENFORCE(ctx->HasInputs(kParameters));
PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName(kParameters))); PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName(kParameters)));
PADDLE_ENFORCE(ctx->HasInput(kInputs)); PADDLE_ENFORCE(ctx->HasInputs(kInputs));
for (auto &s : output) { for (auto &s : output) {
PADDLE_ENFORCE(ctx->HasInputs(s)); PADDLE_ENFORCE(ctx->HasInputs(s));
......
...@@ -270,6 +270,7 @@ class ParallelDo(object): ...@@ -270,6 +270,7 @@ class ParallelDo(object):
for in_var_name in op.input(iname): for in_var_name in op.input(iname):
if in_var_name not in local_inputs: if in_var_name not in local_inputs:
params.append(in_var_name) params.append(in_var_name)
params = list(set(params))
return [parent_block.var(name) for name in params] return [parent_block.var(name) for name in params]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册