提交 a75f8359 编写于 作者: W wuzewu

add L2SP strategy

上级 ff5dd783
...@@ -22,6 +22,7 @@ import multiprocessing ...@@ -22,6 +22,7 @@ import multiprocessing
import paddle.fluid as fluid import paddle.fluid as fluid
from paddlehub.finetune.optimization import adam_weight_decay_optimization from paddlehub.finetune.optimization import adam_weight_decay_optimization
from paddlehub.finetune.regularizer import L2SPDecayRegularizer
def get_pretrained_parameter(main_program, start_program): def get_pretrained_parameter(main_program, start_program):
...@@ -42,8 +43,6 @@ class DefaultStrategy(object): ...@@ -42,8 +43,6 @@ class DefaultStrategy(object):
def __init__(self, learning_rate=1e-4, optimizer_name="adam"): def __init__(self, learning_rate=1e-4, optimizer_name="adam"):
self.learning_rate = learning_rate self.learning_rate = learning_rate
self._optimizer_name = optimizer_name self._optimizer_name = optimizer_name
def execute(self, loss):
if self._optimizer_name.lower() == "sgd": if self._optimizer_name.lower() == "sgd":
self.optimizer = fluid.optimizer.SGD( self.optimizer = fluid.optimizer.SGD(
learning_rate=self.learning_rate) learning_rate=self.learning_rate)
...@@ -75,6 +74,7 @@ class DefaultStrategy(object): ...@@ -75,6 +74,7 @@ class DefaultStrategy(object):
self.optimizer = fluid.optimizer.Adam( self.optimizer = fluid.optimizer.Adam(
learning_rate=self.learning_rate) learning_rate=self.learning_rate)
def execute(self, loss):
if self.optimizer is not None: if self.optimizer is not None:
self.optimizer.minimize(loss) self.optimizer.minimize(loss)
else: else:
...@@ -153,37 +153,35 @@ class DefaultFinetuneStrategy(DefaultStrategy): ...@@ -153,37 +153,35 @@ class DefaultFinetuneStrategy(DefaultStrategy):
self.regularization_coeff = regularization_coeff self.regularization_coeff = regularization_coeff
def execute(self, loss): def execute(self, loss):
if self._optimizer_name.lower() == "sgd": # get pretrained parameters
self.optimizer = fluid.optimizer.SGD( program = loss.block.program
learning_rate=self.learning_rate) global_block = program.global_block()
elif self._optimizer_name.lower() == "adagrad": pretrained_params = get_pretrained_parameter(
self.optimizer = fluid.optimizer.Adagrad( program, fluid.default_startup_program())
learning_rate=self.learning_rate)
elif self._optimizer_name.lower() == "adamax": # set parameter attrs
self.optimizer = fluid.optimizer.Adamax( for index, param in enumerate(pretrained_params):
learning_rate=self.learning_rate) param.regularizer = fluid.regularizer.L2Decay(
elif self._optimizer_name.lower() == "decayedadagrad": regularization_coeff=self.regularization_coeff)
self.optimizer = fluid.optimizer.DecayedAdagrad(
learning_rate=self.learning_rate) if self.optimizer is not None:
elif self._optimizer_name.lower() == "ftrl": self.optimizer.minimize(loss)
self.optimizer = fluid.optimizer.Ftrl(
learning_rate=self.learning_rate)
elif self._optimizer_name.lower() == "larsmomentum":
self.optimizer = fluid.optimizer.LarsMomentum(
learning_rate=self.learning_rate)
elif self._optimizer_name.lower() == "momentum":
self.optimizer = fluid.optimizer.Momentum(
learning_rate=self.learning_rate)
elif self._optimizer_name.lower() == "decayedadagrad":
self.optimizer = fluid.optimizer.DecayedAdagrad(
learning_rate=self.learning_rate)
elif self._optimizer_name.lower() == "rmsprop":
self.optimizer = fluid.optimizer.RMSPropOptimizer(
learning_rate=self.learning_rate)
else: else:
self.optimizer = fluid.optimizer.Adam( raise ValueError("DefaultFinetuneStrategy's optimizer is None")
learning_rate=self.learning_rate)
class L2SPFinetuneStrategy(DefaultStrategy):
def __init__(self,
learning_rate=1e-4,
optimizer_name="adam",
regularization_coeff=1e-3):
super(L2SPFinetuneStrategy, self).__init__(
learning_rate=learning_rate, optimizer_name=optimizer_name)
self.learning_rate = learning_rate
self._optimizer_name = optimizer_name
self.regularization_coeff = regularization_coeff
def execute(self, loss):
# get pretrained parameters # get pretrained parameters
program = loss.block.program program = loss.block.program
global_block = program.global_block() global_block = program.global_block()
...@@ -192,7 +190,7 @@ class DefaultFinetuneStrategy(DefaultStrategy): ...@@ -192,7 +190,7 @@ class DefaultFinetuneStrategy(DefaultStrategy):
# set parameter attrs # set parameter attrs
for index, param in enumerate(pretrained_params): for index, param in enumerate(pretrained_params):
param.regularizer = fluid.regularizer.L2Decay( param.regularizer = L2SPDecayRegularizer(
regularization_coeff=self.regularization_coeff) regularization_coeff=self.regularization_coeff)
if self.optimizer is not None: if self.optimizer is not None:
......
...@@ -12,5 +12,5 @@ ...@@ -12,5 +12,5 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
""" PaddleHub version string """ """ PaddleHub version string """
hub_version = "0.4.6.beta" hub_version = "0.4.8.beta"
module_proto_version = "1.0.0" module_proto_version = "1.0.0"
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册