提交 1cb96359 编写于 作者: T typhoonzero

fix dist train trainspiler bugs

上级 bff0cbfc
...@@ -49,14 +49,17 @@ class SendOp : public framework::OperatorBase { ...@@ -49,14 +49,17 @@ class SendOp : public framework::OperatorBase {
std::vector<std::string> epmap = Attr<std::vector<std::string>>("epmap"); std::vector<std::string> epmap = Attr<std::vector<std::string>>("epmap");
// TODO(typhoonzero): use async calls to send multiple variable asyncly. // TODO(typhoonzero): use async calls to send multiple variable asyncly.
for (size_t i = 0; i < ins.size(); ++i) { for (size_t i = 0; i < ins.size(); ++i) {
VLOG(3) << "sending " << ins[i];
bool ret = client_map_[epmap[i]]->SendVariable(scope, ins[i]); bool ret = client_map_[epmap[i]]->SendVariable(scope, ins[i]);
if (!ret) { if (!ret) {
LOG(ERROR) << "send variable error: " << ins[i]; LOG(ERROR) << "send variable error: " << ins[i];
} }
} }
VLOG(3) << "waiting batch ";
// TODO(typhoonzero): support async optimization // TODO(typhoonzero): support async optimization
client_map_[epmap[0]]->Wait(); client_map_[epmap[0]]->Wait();
for (size_t i = 0; i < outs.size(); ++i) { for (size_t i = 0; i < outs.size(); ++i) {
VLOG(3) << "getting " << outs[i];
bool ret = client_map_[epmap[i]]->GetVariable(scope, outs[i]); bool ret = client_map_[epmap[i]]->GetVariable(scope, outs[i]);
if (!ret) { if (!ret) {
LOG(ERROR) << "GetVariable error: " << outs[i]; LOG(ERROR) << "GetVariable error: " << outs[i];
......
...@@ -95,7 +95,9 @@ class DistributeTranspiler: ...@@ -95,7 +95,9 @@ class DistributeTranspiler:
""" """
if program is None: if program is None:
program = default_main_program() program = default_main_program()
self.program = program
self.trainers = trainers self.trainers = trainers
self.optimize_ops = optimize_ops
self._optimize_distributed( self._optimize_distributed(
optimize_ops, optimize_ops,
program, program,
...@@ -156,9 +158,10 @@ class DistributeTranspiler: ...@@ -156,9 +158,10 @@ class DistributeTranspiler:
attrs={"endpoints": pserver_endpoints, attrs={"endpoints": pserver_endpoints,
"epmap": epmap}) "epmap": epmap})
def get_trainer_program(optimize_ops, program): def get_trainer_program(self):
# remove optimize ops and add a send op to main_program # remove optimize ops and add a send op to main_program
program.global_block().delete_ops(optimize_ops) self.program.global_block().delete_ops(self.optimize_ops)
return self.program
def _create_var_for_trainers(self, block, var, trainers): def _create_var_for_trainers(self, block, var, trainers):
var_list = [] var_list = []
...@@ -210,7 +213,6 @@ class DistributeTranspiler: ...@@ -210,7 +213,6 @@ class DistributeTranspiler:
if opt_op.inputs.has_key("Grad"): if opt_op.inputs.has_key("Grad"):
if opt_op.inputs["Grad"].name in grad_var_names: if opt_op.inputs["Grad"].name in grad_var_names:
print "appending ", opt_op.type, opt_op.inputs
optimize_sub_program.global_block().append_op( optimize_sub_program.global_block().append_op(
type=opt_op.type, type=opt_op.type,
inputs=opt_op.inputs, inputs=opt_op.inputs,
......
...@@ -663,7 +663,7 @@ class Block(object): ...@@ -663,7 +663,7 @@ class Block(object):
end = list(self.ops).index(ops[-1]) end = list(self.ops).index(ops[-1])
except Exception, e: except Exception, e:
raise e raise e
self.desc.remove_op(start, end) self.desc.remove_op(start, end + 1)
def prepend_op(self, *args, **kwargs): def prepend_op(self, *args, **kwargs):
op_desc = self.desc.prepend_op() op_desc = self.desc.prepend_op()
......
...@@ -38,35 +38,43 @@ train_reader = paddle.batch( ...@@ -38,35 +38,43 @@ train_reader = paddle.batch(
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
t = fluid.DistributeTranspiler() t = fluid.DistributeTranspiler()
# all parameter server endpoints list for spliting parameters
pserver_endpoints = os.getenv("PSERVERS") pserver_endpoints = os.getenv("PSERVERS")
# server endpoint for current node
current_endpoint = os.getenv("SERVER_ENDPOINT")
# run as trainer or parameter server
training_role = os.getenv("TRAINING_ROLE", training_role = os.getenv("TRAINING_ROLE",
"TRAINER") # get the training role: trainer/pserver "TRAINER") # get the training role: trainer/pserver
t.transpile(optimize_ops, params_grads, pservers=pserver_endpoints, trainers=1) t.transpile(optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2)
if training_role == "PSERVER": if training_role == "PSERVER":
pserver_prog = t.get_pserver_program(pserver_endpoints, optimize_ops) if not current_endpoint:
print("need env SERVER_ENDPOINT")
exit(1)
pserver_prog = t.get_pserver_program(current_endpoint, optimize_ops)
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
exe.run(pserver_prog) exe.run(pserver_prog)
elif training_role == "TRAINER": elif training_role == "TRAINER":
trainer_prog = t.get_trainer_program()
feeder = fluid.DataFeeder(feed_list=[images, label], place=place) feeder = fluid.DataFeeder(feed_list=[images, label], place=place)
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
for pass_id in range(PASS_NUM): for pass_id in range(PASS_NUM):
accuracy.reset(exe) accuracy.reset(exe)
batch_id = 0
for data in train_reader(): for data in train_reader():
loss, acc = exe.run(fluid.default_main_program(), loss, acc = exe.run(trainer_prog,
feed=feeder.feed(data), feed=feeder.feed(data),
fetch_list=[avg_cost] + accuracy.metrics) fetch_list=[avg_cost] + accuracy.metrics)
pass_acc = accuracy.eval(exe) pass_acc = accuracy.eval(exe)
# print loss, acc if batch_id % 100 == 0:
if loss < 10.0 and pass_acc > 0.9: print("batch_id %d, loss: %f, acc: %f" %
# if avg cost less than 10.0 and accuracy is larger than 0.9, we think our code is good. (batch_id, loss, pass_acc))
exit(0) batch_id += 1
pass_acc = accuracy.eval(exe) pass_acc = accuracy.eval(exe)
print("pass_id=" + str(pass_id) + " pass_acc=" + str(pass_acc)) print("pass_id=" + str(pass_id) + " pass_acc=" + str(pass_acc))
else: else:
print("environment var TRAINER_ROLE should be TRAINER os PSERVER") print("environment var TRAINER_ROLE should be TRAINER os PSERVER")
exit(1)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册