提交 a75f8359 编写于 作者: W wuzewu

add L2SP strategy

上级 ff5dd783
......@@ -22,6 +22,7 @@ import multiprocessing
import paddle.fluid as fluid
from paddlehub.finetune.optimization import adam_weight_decay_optimization
from paddlehub.finetune.regularizer import L2SPDecayRegularizer
def get_pretrained_parameter(main_program, start_program):
......@@ -42,8 +43,6 @@ class DefaultStrategy(object):
def __init__(self, learning_rate=1e-4, optimizer_name="adam"):
self.learning_rate = learning_rate
self._optimizer_name = optimizer_name
def execute(self, loss):
if self._optimizer_name.lower() == "sgd":
self.optimizer = fluid.optimizer.SGD(
learning_rate=self.learning_rate)
......@@ -75,6 +74,7 @@ class DefaultStrategy(object):
self.optimizer = fluid.optimizer.Adam(
learning_rate=self.learning_rate)
def execute(self, loss):
if self.optimizer is not None:
self.optimizer.minimize(loss)
else:
......@@ -153,37 +153,35 @@ class DefaultFinetuneStrategy(DefaultStrategy):
self.regularization_coeff = regularization_coeff
def execute(self, loss):
if self._optimizer_name.lower() == "sgd":
self.optimizer = fluid.optimizer.SGD(
learning_rate=self.learning_rate)
elif self._optimizer_name.lower() == "adagrad":
self.optimizer = fluid.optimizer.Adagrad(
learning_rate=self.learning_rate)
elif self._optimizer_name.lower() == "adamax":
self.optimizer = fluid.optimizer.Adamax(
learning_rate=self.learning_rate)
elif self._optimizer_name.lower() == "decayedadagrad":
self.optimizer = fluid.optimizer.DecayedAdagrad(
learning_rate=self.learning_rate)
elif self._optimizer_name.lower() == "ftrl":
self.optimizer = fluid.optimizer.Ftrl(
learning_rate=self.learning_rate)
elif self._optimizer_name.lower() == "larsmomentum":
self.optimizer = fluid.optimizer.LarsMomentum(
learning_rate=self.learning_rate)
elif self._optimizer_name.lower() == "momentum":
self.optimizer = fluid.optimizer.Momentum(
learning_rate=self.learning_rate)
elif self._optimizer_name.lower() == "decayedadagrad":
self.optimizer = fluid.optimizer.DecayedAdagrad(
learning_rate=self.learning_rate)
elif self._optimizer_name.lower() == "rmsprop":
self.optimizer = fluid.optimizer.RMSPropOptimizer(
learning_rate=self.learning_rate)
# get pretrained parameters
program = loss.block.program
global_block = program.global_block()
pretrained_params = get_pretrained_parameter(
program, fluid.default_startup_program())
# set parameter attrs
for index, param in enumerate(pretrained_params):
param.regularizer = fluid.regularizer.L2Decay(
regularization_coeff=self.regularization_coeff)
if self.optimizer is not None:
self.optimizer.minimize(loss)
else:
self.optimizer = fluid.optimizer.Adam(
learning_rate=self.learning_rate)
raise ValueError("DefaultFinetuneStrategy's optimizer is None")
class L2SPFinetuneStrategy(DefaultStrategy):
def __init__(self,
learning_rate=1e-4,
optimizer_name="adam",
regularization_coeff=1e-3):
super(L2SPFinetuneStrategy, self).__init__(
learning_rate=learning_rate, optimizer_name=optimizer_name)
self.learning_rate = learning_rate
self._optimizer_name = optimizer_name
self.regularization_coeff = regularization_coeff
def execute(self, loss):
# get pretrained parameters
program = loss.block.program
global_block = program.global_block()
......@@ -192,7 +190,7 @@ class DefaultFinetuneStrategy(DefaultStrategy):
# set parameter attrs
for index, param in enumerate(pretrained_params):
param.regularizer = fluid.regularizer.L2Decay(
param.regularizer = L2SPDecayRegularizer(
regularization_coeff=self.regularization_coeff)
if self.optimizer is not None:
......
......@@ -12,5 +12,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
""" PaddleHub version string """
hub_version = "0.4.6.beta"
hub_version = "0.4.8.beta"
module_proto_version = "1.0.0"
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册