提交 b213bb1a 编写于 作者: F frankwhzhang

modify dpsgd_strategy optimizer

上级 08672bda
......@@ -110,10 +110,44 @@ class DPSGDStrategy(FLStrategyBase):
def __init__(self):
super(DPSGDStrategy, self).__init__()
@property
def learning_rate(self):
return self._learning_rate
@learning_rate.setter
def learning_rate(self, s):
self._learning_rate = s
@property
def clip(self):
return self._clip
@clip.setter
def clip(self, s):
self._clip = s
@property
def batch_size(self):
return self._batch_size
@batch_size.setter
def batch_size(self, s):
self._batch_size = s
@property
def sigma(self):
return self._sigma
@sigma.setter
def sigma(self, s):
self._sigma = s
def minimize(self, optimizer=None, losses=[]):
"""
Do nothing in DPSGDStrategy in minimize function
Define Dpsgd optimizeer
"""
optimizer = fluid.optimizer.Dpsgd(self._learning_rate, clip=self._clip, batch_size=self._batch_size, sigma=self._sigma)
print(str(optimizer))
optimizer.minimize(losses[0])
def _build_trainer_program_for_job(
......
......@@ -28,8 +28,7 @@ CLIP = 4.0
batch_size = 64
job_generator = JobGenerator()
optimizer = fluid.optimizer.Dpsgd(0.1, clip=CLIP, batch_size=float(batch_size), sigma=CLIP * SIGMA)
# optimizer = fluid.optimizer.SGD(learning_rate=0.1)
optimizer = fluid.optimizer.SGD(learning_rate=0.1)
job_generator.set_optimizer(optimizer)
job_generator.set_losses([model.loss])
job_generator.set_startup_program(model.startup_program)
......@@ -40,6 +39,10 @@ build_strategy = FLStrategyFactory()
build_strategy.dpsgd = True
build_strategy.inner_step = 1
strategy = build_strategy.create_fl_strategy()
strategy.learning_rate = 0.1
strategy.clip = CLIP
strategy.batch_size = float(batch_size)
strategy.sigma = CLIP * SIGMA
# endpoints will be collected through the cluster
# in this example, we suppose endpoints have been collected
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册