未验证 提交 587f3dd3 编写于 作者: X Xin Pan 提交者: GitHub

Merge pull request #14027 from PaddlePaddle/revert-13960-fix_1_release_1.0.0

Revert "Bug fix in release 1.0.0"
...@@ -70,12 +70,6 @@ class FillConstantOp : public framework::OperatorBase { ...@@ -70,12 +70,6 @@ class FillConstantOp : public framework::OperatorBase {
} }
}; };
class FillConstantOpVarTypeInference : public framework::VarTypeInference {
public:
void operator()(const framework::OpDesc &op_desc,
framework::BlockDesc *block) const override {}
};
class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker { class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() override { void Make() override {
...@@ -108,5 +102,4 @@ Fill up a variable with specified constant value. ...@@ -108,5 +102,4 @@ Fill up a variable with specified constant value.
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OPERATOR(fill_constant, ops::FillConstantOp, REGISTER_OPERATOR(fill_constant, ops::FillConstantOp,
ops::FillConstantInferShape, ops::FillConstantOpMaker, ops::FillConstantInferShape, ops::FillConstantOpMaker,
paddle::framework::EmptyGradOpMaker, paddle::framework::EmptyGradOpMaker);
ops::FillConstantOpVarTypeInference);
...@@ -1522,17 +1522,13 @@ class Program(object): ...@@ -1522,17 +1522,13 @@ class Program(object):
>>> with program.lr_schedule_guard(): >>> with program.lr_schedule_guard():
>>> lr = lr * decay >>> lr = lr * decay
""" """
tmp_role = self._current_role
tmp_var = self._op_role_var
OpRole = core.op_proto_and_checker_maker.OpRole OpRole = core.op_proto_and_checker_maker.OpRole
self._current_role = OpRole.LRSched self._current_role = OpRole.LRSched
# TODO(typhoonzero): how to set target learning rate var # TODO(typhoonzero): how to set target learning rate var
self._op_role_var = [] self._op_role_var = []
yield yield
self._op_role_var = tmp_var self._op_role_var = []
self._current_role = tmp_role self._current_role = OpRole.Forward
def __str__(self): def __str__(self):
""" """
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
from __future__ import print_function from __future__ import print_function
import re import re
from collections import defaultdict from collections import defaultdict
from paddle.fluid.framework import Program, Variable, name_scope, default_main_program from paddle.fluid.framework import Program, Variable, name_scope
from . import framework from . import framework
from . import layers from . import layers
from .backward import append_backward from .backward import append_backward
...@@ -111,7 +111,6 @@ class Optimizer(object): ...@@ -111,7 +111,6 @@ class Optimizer(object):
if param_lr == 1.0: if param_lr == 1.0:
return self._global_learning_rate() return self._global_learning_rate()
else: else:
with default_main_program()._lr_schedule_guard():
return self._global_learning_rate() * param_lr return self._global_learning_rate() * param_lr
def _create_accumulators(self, block, parameters): def _create_accumulators(self, block, parameters):
......
...@@ -81,10 +81,7 @@ def get_optimizer(): ...@@ -81,10 +81,7 @@ def get_optimizer():
return optimizer return optimizer
def train_network(batch_size, def train_network(batch_size, is_distributed=False, is_sparse=False):
is_distributed=False,
is_sparse=False,
is_self_contained_lr=False):
# query # query
q = fluid.layers.data( q = fluid.layers.data(
name="query_ids", shape=[1], dtype="int64", lod_level=1) name="query_ids", shape=[1], dtype="int64", lod_level=1)
...@@ -96,9 +93,7 @@ def train_network(batch_size, ...@@ -96,9 +93,7 @@ def train_network(batch_size,
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.01), initializer=fluid.initializer.Constant(value=0.01),
name="__emb__", name="__emb__",
learning_rate=emb_lr) if is_self_contained_lr else fluid.ParamAttr( learning_rate=emb_lr),
initializer=fluid.initializer.Constant(value=0.01),
name="__emb__"),
is_sparse=is_sparse) is_sparse=is_sparse)
## vsum ## vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
...@@ -124,9 +119,7 @@ def train_network(batch_size, ...@@ -124,9 +119,7 @@ def train_network(batch_size,
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.01), initializer=fluid.initializer.Constant(value=0.01),
name="__emb__", name="__emb__",
learning_rate=emb_lr) if is_self_contained_lr else fluid.ParamAttr( learning_rate=emb_lr),
initializer=fluid.initializer.Constant(value=0.01),
name="__emb__"),
is_sparse=is_sparse) is_sparse=is_sparse)
## vsum ## vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
...@@ -151,9 +144,7 @@ def train_network(batch_size, ...@@ -151,9 +144,7 @@ def train_network(batch_size,
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.01), initializer=fluid.initializer.Constant(value=0.01),
name="__emb__", name="__emb__",
learning_rate=emb_lr) if is_self_contained_lr else fluid.ParamAttr( learning_rate=emb_lr),
initializer=fluid.initializer.Constant(value=0.01),
name="__emb__"),
is_sparse=is_sparse) is_sparse=is_sparse)
## vsum ## vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
...@@ -229,10 +220,7 @@ class TestDistSimnetBow2x2(TestDistRunnerBase): ...@@ -229,10 +220,7 @@ class TestDistSimnetBow2x2(TestDistRunnerBase):
def get_model(self, batch_size=2): def get_model(self, batch_size=2):
# Train program # Train program
avg_cost, acc, predict = \ avg_cost, acc, predict = \
train_network(batch_size, train_network(batch_size, bool(int(os.environ["IS_DISTRIBUTED"])), bool(int(os.environ["IS_SPARSE"])))
bool(int(os.environ["IS_DISTRIBUTED"])),
bool(int(os.environ["IS_SPARSE"])),
bool(int(os.environ["IS_SELF_CONTAINED_LR"])))
inference_program = fluid.default_main_program().clone() inference_program = fluid.default_main_program().clone()
......
...@@ -25,11 +25,7 @@ class TestDistSimnetBowDense2x2(TestDistBase): ...@@ -25,11 +25,7 @@ class TestDistSimnetBowDense2x2(TestDistBase):
self._enforce_place = "CPU" self._enforce_place = "CPU"
def test_simnet_bow(self): def test_simnet_bow(self):
need_envs = { need_envs = {"IS_DISTRIBUTED": '0', "IS_SPARSE": '0'}
"IS_DISTRIBUTED": '0',
"IS_SPARSE": '0',
'IS_SELF_CONTAINED_LR': '1'
}
self.check_with_place( self.check_with_place(
"dist_simnet_bow.py", "dist_simnet_bow.py",
delta=1e-5, delta=1e-5,
...@@ -43,11 +39,7 @@ class TestDistSimnetBow2x2DenseAsync(TestDistBase): ...@@ -43,11 +39,7 @@ class TestDistSimnetBow2x2DenseAsync(TestDistBase):
self._enforce_place = "CPU" self._enforce_place = "CPU"
def test_simnet_bow(self): def test_simnet_bow(self):
need_envs = { need_envs = {"IS_DISTRIBUTED": '0', "IS_SPARSE": '0'}
"IS_DISTRIBUTED": '0',
"IS_SPARSE": '0',
'IS_SELF_CONTAINED_LR': '1'
}
self.check_with_place( self.check_with_place(
"dist_simnet_bow.py", "dist_simnet_bow.py",
delta=100, delta=100,
...@@ -61,11 +53,7 @@ class TestDistSimnetBowSparse2x2(TestDistBase): ...@@ -61,11 +53,7 @@ class TestDistSimnetBowSparse2x2(TestDistBase):
self._enforce_place = "CPU" self._enforce_place = "CPU"
def test_simnet_bow(self): def test_simnet_bow(self):
need_envs = { need_envs = {"IS_DISTRIBUTED": '0', "IS_SPARSE": '1'}
"IS_DISTRIBUTED": '0',
"IS_SPARSE": '1',
'IS_SELF_CONTAINED_LR': '1'
}
self.check_with_place( self.check_with_place(
"dist_simnet_bow.py", "dist_simnet_bow.py",
delta=1e-5, delta=1e-5,
...@@ -79,11 +67,7 @@ class TestDistSimnetBow2x2SparseAsync(TestDistBase): ...@@ -79,11 +67,7 @@ class TestDistSimnetBow2x2SparseAsync(TestDistBase):
self._enforce_place = "CPU" self._enforce_place = "CPU"
def test_simnet_bow(self): def test_simnet_bow(self):
need_envs = { need_envs = {"IS_DISTRIBUTED": '0', "IS_SPARSE": '1'}
"IS_DISTRIBUTED": '0',
"IS_SPARSE": '1',
'IS_SELF_CONTAINED_LR': '1'
}
self.check_with_place( self.check_with_place(
"dist_simnet_bow.py", "dist_simnet_bow.py",
delta=100, delta=100,
...@@ -91,59 +75,5 @@ class TestDistSimnetBow2x2SparseAsync(TestDistBase): ...@@ -91,59 +75,5 @@ class TestDistSimnetBow2x2SparseAsync(TestDistBase):
need_envs=need_envs) need_envs=need_envs)
class TestDistSimnetBow2x2LookupTableSync(TestDistBase):
def _setup_config(self):
self._sync_mode = True
self._enforce_place = "CPU"
def test_simnet_bow(self):
need_envs = {
"IS_DISTRIBUTED": '1',
"IS_SPARSE": '1',
'IS_SELF_CONTAINED_LR': '1'
}
self.check_with_place(
"dist_simnet_bow.py",
delta=1e-5,
check_error_log=False,
need_envs=need_envs)
class TestDistSimnetBow2x2LookupTableAsync(TestDistBase):
def _setup_config(self):
self._sync_mode = False
self._enforce_place = "CPU"
def test_simnet_bow(self):
need_envs = {
"IS_DISTRIBUTED": '1',
"IS_SPARSE": '1',
'IS_SELF_CONTAINED_LR': '1'
}
self.check_with_place(
"dist_simnet_bow.py",
delta=100,
check_error_log=False,
need_envs=need_envs)
class TestDistSimnetBow2x2LookupTableNotContainLRSync(TestDistBase):
def _setup_config(self):
self._sync_mode = True
self._enforce_place = "CPU"
def test_simnet_bow(self):
need_envs = {
"IS_DISTRIBUTED": '1',
"IS_SPARSE": '1',
'IS_SELF_CONTAINED_LR': '0'
}
self.check_with_place(
"dist_simnet_bow.py",
delta=1e-5,
check_error_log=False,
need_envs=need_envs)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -1118,7 +1118,6 @@ to transpile() call.") ...@@ -1118,7 +1118,6 @@ to transpile() call.")
def _split_table_grad_and_add_send_vars(self, program, pserver_endpoints): def _split_table_grad_and_add_send_vars(self, program, pserver_endpoints):
# 2. add split_ids_op and send_op to send gradient to pservers # 2. add split_ids_op and send_op to send gradient to pservers
# there should only be one table_name # there should only be one table_name
all_ops = program.global_block().ops all_ops = program.global_block().ops
table_grad_name = grad_var_name(self.table_name) table_grad_name = grad_var_name(self.table_name)
...@@ -1143,7 +1142,7 @@ to transpile() call.") ...@@ -1143,7 +1142,7 @@ to transpile() call.")
if self.sync_mode else [] if self.sync_mode else []
}, },
attrs={ attrs={
"sync_mode": not self.sync_mode, "sync_mode": self.sync_mode,
"epmap": pserver_endpoints, "epmap": pserver_endpoints,
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE,
OP_ROLE_VAR_ATTR_NAME: [ OP_ROLE_VAR_ATTR_NAME: [
...@@ -1189,15 +1188,7 @@ to transpile() call.") ...@@ -1189,15 +1188,7 @@ to transpile() call.")
def _create_table_optimize_block(self, pserver_index, pserver_program, def _create_table_optimize_block(self, pserver_index, pserver_program,
pre_block_idx, grad_to_block_id): pre_block_idx, grad_to_block_id):
# STEP: create table optimize block # STEP: create table optimize block
table_opt_block = pserver_program._create_block(pre_block_idx)
# create table param and grad var in pserver program # create table param and grad var in pserver program
# create table optimize block in pserver program
table_opt_op = [
op for op in self.optimize_ops
if 'Param' in op.input_names and op.input("Param")[0] ==
self.table_name
][0]
origin_param_var = self.origin_program.global_block().vars[ origin_param_var = self.origin_program.global_block().vars[
self.table_name] self.table_name]
...@@ -1213,16 +1204,19 @@ to transpile() call.") ...@@ -1213,16 +1204,19 @@ to transpile() call.")
dtype=origin_param_var.dtype, dtype=origin_param_var.dtype,
type=core.VarDesc.VarType.SELECTED_ROWS, type=core.VarDesc.VarType.SELECTED_ROWS,
persistable=True) persistable=True)
# parameter must be selected rows # parameter must be selected rows
param_var.desc.set_type(core.VarDesc.VarType.SELECTED_ROWS) param_var.desc.set_type(core.VarDesc.VarType.SELECTED_ROWS)
grad_var = pserver_program.global_block()._clone_variable( grad_var = pserver_program.global_block()._clone_variable(
self.origin_program.global_block().vars[grad_var_name( self.origin_program.global_block().vars[grad_var_name(
self.table_name)]) self.table_name)])
lr_var = pserver_program.global_block()._clone_variable( # create table optimize block in pserver program
self.origin_program.global_block().vars[table_opt_op.input( table_opt_op = [
"LearningRate")[0]]) op for op in self.optimize_ops
if 'Param' in op.input_names and op.input("Param")[0] ==
self.table_name
][0]
table_opt_block = pserver_program._create_block(pre_block_idx)
if self.sync_mode: if self.sync_mode:
# create grad vars in pserver program # create grad vars in pserver program
...@@ -1254,6 +1248,8 @@ to transpile() call.") ...@@ -1254,6 +1248,8 @@ to transpile() call.")
grad_var = pserver_program.global_block()._rename_var( grad_var = pserver_program.global_block()._rename_var(
origin_grad_name, splited_grad_name) origin_grad_name, splited_grad_name)
lr_var = pserver_program.global_block().vars[table_opt_op.input(
"LearningRate")[0]]
inputs = { inputs = {
"Param": [param_var], "Param": [param_var],
"Grad": [grad_var], "Grad": [grad_var],
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册