From 1492712e89578a7c7a2a9de58e3b251301fd2010 Mon Sep 17 00:00:00 2001 From: minghaoBD <79566150+minghaoBD@users.noreply.github.com> Date: Fri, 4 Jun 2021 10:09:49 +0800 Subject: [PATCH] prune the optimizer in dygraph mode (#784) * prune the optimizer in dygraph mode * set opt when initializing pruner * tests passed locally * update Chinese docs and tutorials * refine docs style * avoid ambiguation in docs --- .../dygraph/pruners/fpgm_filter_pruner.rst | 36 ++++++-- .../dygraph/pruners/l1norm_filter_pruner.rst | 44 +++++++--- .../dygraph/pruners/l2norm_filter_pruner.rst | 40 +++++++-- .../dygraph/dygraph_pruning_tutorial.md | 13 +-- .../pruning/dygraph/filter_pruning.md | 13 +-- .../dygraph/self_defined_filter_pruning.md | 17 ++-- paddleslim/dygraph/prune/filter_pruner.py | 13 ++- paddleslim/dygraph/prune/fpgm_pruner.py | 5 +- paddleslim/dygraph/prune/l1norm_pruner.py | 4 +- paddleslim/dygraph/prune/l2norm_pruner.py | 4 +- paddleslim/dygraph/prune/pruner.py | 7 +- paddleslim/dygraph/prune/pruning_plan.py | 83 +++++++++++++++++-- tests/dygraph/test_filter_pruner.py | 12 ++- 13 files changed, 211 insertions(+), 80 deletions(-) diff --git a/docs/zh_cn/api_cn/dygraph/pruners/fpgm_filter_pruner.rst b/docs/zh_cn/api_cn/dygraph/pruners/fpgm_filter_pruner.rst index 4759b16e..4776ce59 100644 --- a/docs/zh_cn/api_cn/dygraph/pruners/fpgm_filter_pruner.rst +++ b/docs/zh_cn/api_cn/dygraph/pruners/fpgm_filter_pruner.rst @@ -1,11 +1,11 @@ FPGMFilterPruner ================== -.. py:class:: paddleslim.FPGMFilterPruner(model, inputs, sen_file=None) +.. py:class:: paddleslim.FPGMFilterPruner(model, inputs, sen_file=None, opt=None) `源代码 `_ -用于剪裁卷积层输出通道的的剪裁器。该剪裁器按论文 `Filter Pruning via Geometric Median for Deep Convolutional Neural Networks Acceleration _` 中的统计方法对单个卷积层内的 ``Filters`` 的重要性进行排序,并按指定比例剪裁掉相对不重要的 ``Filters`` 。对 ``Filters`` 的剪裁等价于剪裁卷积层的输出通道数。 +用于剪裁卷积层输出通道的的剪裁器。该剪裁器按论文 `Filter Pruning via Geometric Median for Deep Convolutional Neural Networks Acceleration `_ 中的统计方法对单个卷积层内的 ``Filters`` 的重要性进行排序,并按指定比例剪裁掉相对不重要的 ``Filters`` 。对 ``Filters`` 的剪裁等价于剪裁卷积层的输出通道数。 **参数:** @@ -15,17 +15,40 @@ FPGMFilterPruner - **sen_file(str)** - 存储敏感度信息的文件,需要指定为绝对路径。在调用当前剪裁器的 ``sensitive`` 方法时,敏感度信息会以增量的形式追加到文件 ``sen_file`` 中。如果用户不需要敏感度剪裁策略,可以将该选项设置为 ``None`` 。默认为None。 +- **opt(paddle.optimizer.Optimizer)** - 动态图模型训练时用到的优化器。传入该参数是为了解决上述 ``model(paddle.nn.Layer)`` 不含有优化器,导致不能剪裁到优化器参数(例如 ``Momentum`` 中的 ``velocity`` )的问题。是否传入 ``optimizer`` 参数的逻辑为:若已经初始化了 ``optimizer`` 对象,则传入;否则,在调用 ``pruner.prune_vars()`` 之后初始化 ``optimize`` 。默认为None。 + **返回:** 一个剪裁器实例。 -**示例代码:** +**示例代码1:** .. code-block:: python + import paddle from paddle.vision.models import mobilenet_v1 from paddleslim import FPGMFilterPruner - net = mobilenet_v1(pretrained=False) + net = mobilenet_v1(pretrained=False) pruner = FPGMFilterPruner(net, [1, 3, 224, 224]) + pruner.prune_var("conv2d_26.w_0", [0], pruned_ratio=0.5) + optimizer = paddle.optimizer.Momentum( + learning_rate=0.1, + parameters=net.parameters()) .. + +**示例代码2:** + +.. code-block:: python + + import paddle + from paddle.vision.models import mobilenet_v1 + from paddleslim import FPGMFilterPruner + net = mobilenet_v1(pretrained=False) + optimizer = paddle.optimizer.Momentum( + learning_rate=0.1, + parameters=net.parameters()) + pruner = FPGMFilterPruner(net, [1, 3, 224, 224], opt=optimizer) +.. + +**注意:** 上述两段代码展示了如何在 ``pruner`` 中是否调用 ``optimizer`` ,在示例代码1中,初始化 ``optimizer`` 时传入的 ``parameters`` 为剪裁后的 ``net.parameters()`` ,故无需在初始化 ``pruner`` 时传入 ``optimizer`` ;反之在示例代码2中, ``optimizer`` 中的 ``parameter`` 为剪裁前,故需要传入给 ``pruner`` 一并剪裁 ``optimizer`` 中的相关参数。 .. py:method:: prune_var(var_name, pruned_dims, pruned_ratio, apply="impretive") @@ -124,6 +147,7 @@ FPGMFilterPruner 0.2: 0.4 } } + .. 其中,``weight_0`` 是卷积层权重变量的名称, ``sensitivities['weight_0']`` 是一个字典, key是用 ``float`` 类型数值表示的剪裁率,value是对应剪裁率下整个模型的精度损失比例。 @@ -169,7 +193,7 @@ FPGMFilterPruner pruner = FPGMFilterPruner(net, [1, 3, 224, 224]) sen = pruner.sensitive(eval_func=eval_fn, sen_file="./sen.pickle") print(f"sen: {sen}") - + .. .. py:method:: sensitive_prune(pruned_flops, skip_vars=[], align=None) @@ -231,6 +255,6 @@ FPGMFilterPruner sen = pruner.sensitive(eval_func=eval_fn, sen_file="./sen.pickle") plan = pruner.sensitive_prune(0.5, align=8) print(f"plan: {plan}") - + .. diff --git a/docs/zh_cn/api_cn/dygraph/pruners/l1norm_filter_pruner.rst b/docs/zh_cn/api_cn/dygraph/pruners/l1norm_filter_pruner.rst index e848fd1d..b03122c8 100644 --- a/docs/zh_cn/api_cn/dygraph/pruners/l1norm_filter_pruner.rst +++ b/docs/zh_cn/api_cn/dygraph/pruners/l1norm_filter_pruner.rst @@ -1,7 +1,7 @@ L1NormFilterPruner ================== -.. py:class:: paddleslim.L1NormFilterPruner(model, inputs, sen_file=None) +.. py:class:: paddleslim.L1NormFilterPruner(model, inputs, sen_file=None, opt=None) `源代码 `_ @@ -15,16 +15,40 @@ L1NormFilterPruner - **sen_file(str)** - 存储敏感度信息的文件,需要指定为绝对路径。在调用当前剪裁器的 ``sensitive`` 方法时,敏感度信息会以增量的形式追加到文件 ``sen_file`` 中。如果用户不需要敏感度剪裁策略,可以将该选项设置为 ``None`` 。默认为None。 +- **opt(paddle.optimizer.Optimizer)** - 动态图模型训练时用到的优化器。传入该参数是为了解决上述 ``model(paddle.nn.Layer)`` 不含有优化器,导致不能剪裁到优化器参数(例如 ``Momentum`` 中的 ``velocity`` )的问题。是否传入 ``optimizer`` 参数的逻辑为:若已经初始化了 ``optimizer`` 对象,则传入;否则,在调用了 ``pruner.prune_vars()`` 之后初始化 ``optimizer`` 。默认为None。 + **返回:** 一个剪裁器实例。 -**示例代码:** +**示例代码1:** .. code-block:: python + + import paddle from paddle.vision.models import mobilenet_v1 from paddleslim import L1NormFilterPruner net = mobilenet_v1(pretrained=False) pruner = L1NormFilterPruner(net, [1, 3, 224, 224]) + pruner.prune_var("conv2d_26.w_0", [0], pruned_ratio=0.5) + optimizer = paddle.optimizer.Momentum( + learning_rate=0.1, + parameters=net.parameters()) +.. + +**示例代码2:** + +.. code-block:: python + + import paddle + from paddle.vision.models import mobilenet_v1 + from paddleslim import L1NormFilterPruner + net = mobilenet_v1(pretrained=False) + optimizer = paddle.optimizer.Momentum( + learning_rate=0.1, + parameters=net.parameters()) + pruner = L1NormFilterPruner(net, [1, 3, 224, 224], opt=optimizer) .. + +**注意:** 上述两段代码展示了如何在 ``pruner`` 中是否调用 ``optimizer`` ,在示例代码1中,初始化 ``optimizer`` 时传入的 ``parameters`` 为剪裁后的 ``net.parameters()`` ,故无需在初始化 ``pruner`` 时传入 ``optimizer`` ;反之在示例代码2中, ``optimizer`` 中的 ``parameter`` 为剪裁前,故需要传入给 ``pruner`` 一并剪裁 ``optimizer`` 中的相关参数。 .. py:method:: prune_var(var_name, pruned_dims, pruned_ratio, apply="impretive") @@ -49,6 +73,7 @@ L1NormFilterPruner 点击 `AIStudio <>`_ 执行以下示例代码。 .. code-block:: python + import paddle from paddle.vision.models import mobilenet_v1 from paddleslim import L1NormFilterPruner @@ -57,8 +82,7 @@ L1NormFilterPruner plan = pruner.prun_var("conv2d_26.w_0", [0]) print(f"plan: {plan}") paddle.summary(net, (1, 3, 224, 224)) - - .. + .. .. py:method:: prune_vars(ratios, axis, apply="impretive") @@ -81,6 +105,7 @@ L1NormFilterPruner 点击 `AIStudio <>`_ 执行以下示例代码。 .. code-block:: python + import paddle from paddle.vision.models import mobilenet_v1 from paddleslim import L1NormFilterPruner @@ -89,7 +114,6 @@ L1NormFilterPruner plan = pruner.prun_vars({"conv2d_26.w_0": 0.5}, [0]) print(f"plan: {plan}") paddle.summary(net, (1, 3, 224, 224)) - .. .. py:method:: sensitive(eval_func=None, sen_file=None, target_vars=None, skip_vars=[]) @@ -121,7 +145,7 @@ L1NormFilterPruner 0.2: 0.4 } } - + .. 其中,``weight_0`` 是卷积层权重变量的名称, ``sensitivities['weight_0']`` 是一个字典, key是用 ``float`` 类型数值表示的剪裁率,value是对应剪裁率下整个模型的精度损失比例。 **示例:** @@ -129,6 +153,7 @@ L1NormFilterPruner 点击 `AIStudio <>`_ 执行以下示例代码。 .. code-block:: python + import paddle from paddle.vision.models import mobilenet_v1 from paddleslim import L1NormFilterPruner @@ -165,7 +190,7 @@ L1NormFilterPruner pruner = L1NormFilterPruner(net, [1, 3, 224, 224]) sen = pruner.sensitive(eval_func=eval_fn, sen_file="./sen.pickle") print(f"sen: {sen}") - + .. .. py:method:: sensitive_prune(pruned_flops, skip_vars=[], align=None) @@ -189,6 +214,7 @@ L1NormFilterPruner 点击 `AIStudio <>`_ 执行以下示例代码。 .. code-block:: python + import paddle from paddle.vision.models import mobilenet_v1 from paddleslim import L1NormFilterPruner @@ -226,6 +252,4 @@ L1NormFilterPruner sen = pruner.sensitive(eval_func=eval_fn, sen_file="./sen.pickle") plan = pruner.sensitive_prune(0.5, align=8) print(f"plan: {plan}") - - - + .. diff --git a/docs/zh_cn/api_cn/dygraph/pruners/l2norm_filter_pruner.rst b/docs/zh_cn/api_cn/dygraph/pruners/l2norm_filter_pruner.rst index d5527a40..adb2b95c 100644 --- a/docs/zh_cn/api_cn/dygraph/pruners/l2norm_filter_pruner.rst +++ b/docs/zh_cn/api_cn/dygraph/pruners/l2norm_filter_pruner.rst @@ -1,7 +1,7 @@ L2NormFilterPruner ================== -.. py:class:: paddleslim.L2NormFilterPruner(model, inputs, sen_file=None) +.. py:class:: paddleslim.L2NormFilterPruner(model, inputs, sen_file=None, opt=None) `源代码 `_ @@ -15,16 +15,41 @@ L2NormFilterPruner - **sen_file(str)** - 存储敏感度信息的文件,需要指定为绝对路径。在调用当前剪裁器的 ``sensitive`` 方法时,敏感度信息会以增量的形式追加到文件 ``sen_file`` 中。如果用户不需要敏感度剪裁策略,可以将该选项设置为 ``None`` 。默认为None。 +- **opt(paddle.optimizer.Optimizer)** - 动态图模型训练时用到的优化器。传入该参数是为了解决上述 ``model(paddle.nn.Layer)`` 不含有优化器,导致不能剪裁到优化器参数(例如 ``Momentum`` 中的 ``velocity`` )的问题。是否传入 ``optimizer`` 参数的逻辑为:若已经初始化了 ``optimizer`` 对象,则传入;否则,在调用pruner.prune_vars()之后初始化 ``optimizer`` 。默认为None。 + **返回:** 一个剪裁器实例。 -**示例代码:** +**示例代码1:** .. code-block:: python + + import paddle from paddle.vision.models import mobilenet_v1 from paddleslim import L2NormFilterPruner net = mobilenet_v1(pretrained=False) pruner = L2NormFilterPruner(net, [1, 3, 224, 224]) + pruner.prune_var("conv2d_26.w_0", [0], pruned_ratio=0.5) + optimizer = paddle.optimizer.Momentum( + learning_rate=0.1, + parameters=net.parameters()) .. + +**示例代码2:** + +.. code-block:: python + + import paddle + from paddle.vision.models import mobilenet_v1 + from paddleslim import L2NormFilterPruner + net = mobilenet_v1(pretrained=False) + optimizer = paddle.optimizer.Momentum( + learning_rate=0.1, + parameters=net.parameters()) + pruner = L2NormFilterPruner(net, [1, 3, 224, 224], opt=optimizer) +.. + +**注意:** 上述两段代码展示了如何在 ``pruner`` 中是否调用 ``optimizer`` ,在示例代码1中,初始化 ``optimizer`` 时传入的 ``parameters`` 为剪裁后的 ``net.parameters()`` ,故无需在初始化 ``pruner`` 时传入 ``optimizer`` ;反之在示例代码2中, ``optimizer`` 中的 ``parameter`` 为剪裁前,故需要传入给 ``pruner`` 一并剪裁 ``optimizer`` 中的相关参数。 + .. py:method:: prune_var(var_name, pruned_dims, pruned_ratio, apply="impretive") @@ -49,6 +74,7 @@ L2NormFilterPruner 点击 `AIStudio <>`_ 执行以下示例代码。 .. code-block:: python + import paddle from paddle.vision.models import mobilenet_v1 from paddleslim import L2NormFilterPruner @@ -81,6 +107,7 @@ L2NormFilterPruner 点击 `AIStudio <>`_ 执行以下示例代码。 .. code-block:: python + import paddle from paddle.vision.models import mobilenet_v1 from paddleslim import L2NormFilterPruner @@ -121,6 +148,8 @@ L2NormFilterPruner 0.2: 0.4 } } + + .. 其中,``weight_0`` 是卷积层权重变量的名称, ``sensitivities['weight_0']`` 是一个字典, key是用 ``float`` 类型数值表示的剪裁率,value是对应剪裁率下整个模型的精度损失比例。 @@ -129,6 +158,7 @@ L2NormFilterPruner 点击 `AIStudio <>`_ 执行以下示例代码。 .. code-block:: python + import paddle from paddle.vision.models import mobilenet_v1 from paddleslim import L2NormFilterPruner @@ -165,7 +195,7 @@ L2NormFilterPruner pruner = L2NormFilterPruner(net, [1, 3, 224, 224]) sen = pruner.sensitive(eval_func=eval_fn, sen_file="./sen.pickle") print(f"sen: {sen}") - + .. .. py:method:: sensitive_prune(pruned_flops, skip_vars=[], align=None) @@ -226,6 +256,4 @@ L2NormFilterPruner sen = pruner.sensitive(eval_func=eval_fn, sen_file="./sen.pickle") plan = pruner.sensitive_prune(0.5, align=8) print(f"plan: {plan}") - - - + .. diff --git a/docs/zh_cn/quick_start/dygraph/dygraph_pruning_tutorial.md b/docs/zh_cn/quick_start/dygraph/dygraph_pruning_tutorial.md index 3b50e379..b93a8121 100644 --- a/docs/zh_cn/quick_start/dygraph/dygraph_pruning_tutorial.md +++ b/docs/zh_cn/quick_start/dygraph/dygraph_pruning_tutorial.md @@ -74,11 +74,12 @@ FLOPs = paddle.flops(net, input_size=[1, 3, 32, 32], print_detail=True) 代码如下所示: ```python -pruner = L1NormFilterPruner(net, [1, 3, 32, 32]) +pruner = L1NormFilterPruner(net, [1, 3, 32, 32], opt=optimizer) pruner.prune_vars({'conv2d_22.w_0':0.5, 'conv2d_20.w_0':0.6}, axis=0) ``` 以上操作会按照网络结构中不同网路层的冗余程度对网络层进行不同程度的裁剪并修改网络模型结构。 +**注意:** 需要将`optimizer`传入`pruner`中,这是为了保证`optimizer`中的参数可以被剪裁到。例如:`momentum`中的`velocity`。但是如果在`pruner`后定义`optimizer`,则无需传入了,因为初始化`optimizer`时会指定`parameters=net.parameters()`。 ### 4.3 计算剪裁之后的FLOPs @@ -102,16 +103,6 @@ model.evaluate(val_dataset, batch_size=128, verbose=1) 以下代码对裁剪过后的模型进行评估后执行了一个`epoch`的微调,再对微调过后的模型重新进行评估: ```python - -optimizer = paddle.optimizer.Momentum( - learning_rate=0.1, - parameters=net.parameters()) - -model.prepare( - optimizer, - paddle.nn.CrossEntropyLoss(), - paddle.metric.Accuracy(topk=(1, 5))) - model.fit(train_dataset, epochs=1, batch_size=128, verbose=1) model.evaluate(val_dataset, batch_size=128, verbose=1) ``` diff --git a/docs/zh_cn/tutorials/pruning/dygraph/filter_pruning.md b/docs/zh_cn/tutorials/pruning/dygraph/filter_pruning.md index 624d68d2..877de28a 100644 --- a/docs/zh_cn/tutorials/pruning/dygraph/filter_pruning.md +++ b/docs/zh_cn/tutorials/pruning/dygraph/filter_pruning.md @@ -79,13 +79,15 @@ PaddleSlim提供了工具类`Pruner`来进行重要性分析和剪裁操作, ```python from paddleslim.dygraph import L1NormFilterPruner -pruner = L1NormFilterPruner(net, [1, 3, 224, 224]) +pruner = L1NormFilterPruner(net, [1, 3, 224, 224], opt=optimizer) ``` +**注意:** 需要将`optimizer`传入`pruner`中,这是为了保证`optimizer`中的参数可以被剪裁到。例如:`momentum`中的`velocity`。但是如果在`pruner`后定义`optimizer`,则无需传入了,因为初始化`optimizer`时会指定`parameters=net.parameters()`。 + 如果本地文件系统已有一个存储敏感度信息(见4.1节)的文件,声明`L1NormFilterPruner`对象时,可以通过指定`sen_file`选项加载计算好的敏感度信息,如下: ```python -#pruner = L1NormFilterPruner(net, [1, 3, 224, 224]), sen_file="./sen.pickle") +#pruner = L1NormFilterPruner(net, [1, 3, 224, 224]), sen_file="./sen.pickle", opt=optimizer) ``` ### 4.1 卷积重要性分析 @@ -167,13 +169,6 @@ print(f"before fine-tuning: {result}") 对剪裁后的模型重新训练, 并再测试集上测试精度,如下: ```python -optimizer = paddle.optimizer.Momentum( - learning_rate=0.1, - parameters=net.parameters()) -model.prepare( - optimizer, - paddle.nn.CrossEntropyLoss(), - paddle.metric.Accuracy(topk=(1, 5))) model.fit(train_dataset, epochs=2, batch_size=128, verbose=1) result = model.evaluate(val_dataset,batch_size=128, log_freq=10) print(f"after fine-tuning: {result}") diff --git a/docs/zh_cn/tutorials/pruning/dygraph/self_defined_filter_pruning.md b/docs/zh_cn/tutorials/pruning/dygraph/self_defined_filter_pruning.md index d52853e9..1fdf522b 100644 --- a/docs/zh_cn/tutorials/pruning/dygraph/self_defined_filter_pruning.md +++ b/docs/zh_cn/tutorials/pruning/dygraph/self_defined_filter_pruning.md @@ -70,9 +70,9 @@ from paddleslim.dygraph import FilterPruner class L2NormFilterPruner(FilterPruner): - def __init__(self, model, input_shape, sen_file=None): + def __init__(self, model, input_shape, sen_file=None, opt=None): super(L2NormFilterPruner, self).__init__( - model, input_shape, sen_file=sen_file) + model, input_shape, sen_file=sen_file, opt=opt) def cal_mask(self, var_name, pruned_ratio, group): value = group[var_name]['value'] @@ -148,9 +148,9 @@ from paddleslim.dygraph import FilterPruner class FPGMFilterPruner(FilterPruner): - def __init__(self, model, input_shape, sen_file=None): + def __init__(self, model, input_shape, sen_file=None, opt=None): super(FPGMFilterPruner, self).__init__( - model, input_shape, sen_file=sen_file) + model, input_shape, sen_file=sen_file, opt=opt) def cal_mask(self, var_name, pruned_ratio, group): value = group[var_name]['value'] @@ -223,7 +223,7 @@ print(result) ### 5.2 计算敏感度 ```python -pruner = FPGMFilterPruner(net, [1, 3, 32, 32]) +pruner = FPGMFilterPruner(net, [1, 3, 32, 32], opt=optimizer) def eval_fn(): result = model.evaluate( val_dataset, @@ -250,13 +250,6 @@ print(f"before fine-tuning: {result}") ### 5.4 重训练 ```python -optimizer = paddle.optimizer.Momentum( - learning_rate=0.1, - parameters=net.parameters()) -model.prepare( - optimizer, - paddle.nn.CrossEntropyLoss(), - paddle.metric.Accuracy(topk=(1, 5))) model.fit(train_dataset, epochs=2, batch_size=128, verbose=1) result = model.evaluate(val_dataset,batch_size=128, log_freq=10) print(f"after fine-tuning: {result}") diff --git a/paddleslim/dygraph/prune/filter_pruner.py b/paddleslim/dygraph/prune/filter_pruner.py index 031d2a69..30143000 100644 --- a/paddleslim/dygraph/prune/filter_pruner.py +++ b/paddleslim/dygraph/prune/filter_pruner.py @@ -56,12 +56,11 @@ class FilterPruner(Pruner): """ - def __init__(self, model, inputs, sen_file=None): - super(FilterPruner, self).__init__(model, inputs) + def __init__(self, model, inputs, sen_file=None, opt=None): + super(FilterPruner, self).__init__(model, inputs, opt=opt) self._status = Status(sen_file) # sensitive and collections are just used in filter pruning self.collections = DygraphPruningCollections(model, inputs) - # skip vars in: # 1. depthwise conv2d layer self.skip_vars = [] @@ -216,7 +215,7 @@ class FilterPruner(Pruner): plan = self.prune_vars(ratios, axis=dims) c_flops = flops(self.model, self.inputs) c_pruned_flops = (base_flops - c_flops) / base_flops - plan.restore(self.model) + plan.restore(self.model, opt=self.opt) _logger.debug("Seaching ratios, pruned FLOPs: {}".format( c_pruned_flops)) key = str(round(c_pruned_flops, 4)) @@ -265,7 +264,7 @@ class FilterPruner(Pruner): var_name, ratio, loss)) sensitivities[var_name][ratio] = loss self._status.save(status_file) - plan.restore(model) + plan.restore(model, opt=self.opt) return sensitivities @@ -287,7 +286,7 @@ class FilterPruner(Pruner): def restore(self): if self.plan is not None: - self.plan.restore(self.model) + self.plan.restore(self.model, opt=self.opt) def cal_mask(self, pruned_ratio, collection): raise NotImplemented("cal_mask is not implemented") @@ -347,7 +346,7 @@ class FilterPruner(Pruner): if apply == "lazy": plan.apply(self.model, lazy=True) elif apply == "impretive": - plan.apply(self.model, lazy=False) + plan.apply(self.model, lazy=False, opt=self.opt) return plan def _transform_mask(self, mask, transform): diff --git a/paddleslim/dygraph/prune/fpgm_pruner.py b/paddleslim/dygraph/prune/fpgm_pruner.py index 60a37fd2..b230b093 100644 --- a/paddleslim/dygraph/prune/fpgm_pruner.py +++ b/paddleslim/dygraph/prune/fpgm_pruner.py @@ -12,8 +12,9 @@ _logger = get_logger(__name__, logging.INFO) class FPGMFilterPruner(FilterPruner): - def __init__(self, model, inputs, sen_file=None): - super(FPGMFilterPruner, self).__init__(model, inputs, sen_file=sen_file) + def __init__(self, model, inputs, sen_file=None, opt=None): + super(FPGMFilterPruner, self).__init__( + model, inputs, sen_file=sen_file, opt=opt) def cal_mask(self, pruned_ratio, collection): var_name = collection.master_name diff --git a/paddleslim/dygraph/prune/l1norm_pruner.py b/paddleslim/dygraph/prune/l1norm_pruner.py index 0d8f1283..014d8056 100644 --- a/paddleslim/dygraph/prune/l1norm_pruner.py +++ b/paddleslim/dygraph/prune/l1norm_pruner.py @@ -12,9 +12,9 @@ _logger = get_logger(__name__, logging.INFO) class L1NormFilterPruner(FilterPruner): - def __init__(self, model, inputs, sen_file=None): + def __init__(self, model, inputs, sen_file=None, opt=None): super(L1NormFilterPruner, self).__init__( - model, inputs, sen_file=sen_file) + model, inputs, sen_file=sen_file, opt=opt) def cal_mask(self, pruned_ratio, collection): var_name = collection.master_name diff --git a/paddleslim/dygraph/prune/l2norm_pruner.py b/paddleslim/dygraph/prune/l2norm_pruner.py index da527c05..64c1d6f6 100644 --- a/paddleslim/dygraph/prune/l2norm_pruner.py +++ b/paddleslim/dygraph/prune/l2norm_pruner.py @@ -12,9 +12,9 @@ _logger = get_logger(__name__, logging.INFO) class L2NormFilterPruner(FilterPruner): - def __init__(self, model, inputs, sen_file=None): + def __init__(self, model, inputs, sen_file=None, opt=None): super(L2NormFilterPruner, self).__init__( - model, inputs, sen_file=sen_file) + model, inputs, sen_file=sen_file, opt=opt) def cal_mask(self, pruned_ratio, collection): var_name = collection.master_name diff --git a/paddleslim/dygraph/prune/pruner.py b/paddleslim/dygraph/prune/pruner.py index 88597785..a864fe8c 100644 --- a/paddleslim/dygraph/prune/pruner.py +++ b/paddleslim/dygraph/prune/pruner.py @@ -16,16 +16,17 @@ class Pruner(object): Args: model(paddle.nn.Layer): The target model to be pruned. input_shape(list): The input shape of model. It is used to trace the graph of the model. - + opt(paddle.optimizer.Optimizer): The model's optimizer. Default: None. """ - def __init__(self, model, inputs): + def __init__(self, model, inputs, opt=None): self.model = model self.inputs = inputs self._var_shapes = {} for var in model.parameters(): self._var_shapes[var.name] = var.shape self.plan = None + self.opt = opt def status(self, data=None, eval_func=None, status_file=None): raise NotImplemented("status is not implemented") @@ -53,6 +54,6 @@ class Pruner(object): if apply == "lazy": global_plan.apply(self.model, lazy=True) elif apply == "impretive": - global_plan.apply(self.model, lazy=False) + global_plan.apply(self.model, lazy=False, opt=self.opt) self.plan = global_plan return global_plan diff --git a/paddleslim/dygraph/prune/pruning_plan.py b/paddleslim/dygraph/prune/pruning_plan.py index c6e91a65..cacdcbf4 100644 --- a/paddleslim/dygraph/prune/pruning_plan.py +++ b/paddleslim/dygraph/prune/pruning_plan.py @@ -95,11 +95,77 @@ class PruningPlan(): for name, mask in self._masks.items() ]) + details - def apply(self, model, lazy=False): + def _prune_opt(self, param_name, dims, bool_mask, opt): + if opt is None: + return + for k, v in opt._accumulators.items(): + var_tmp = v.get(param_name) + #NOTE: var_tmp.shape == [1] is used to skip variables like beta1_pow_acc in Adam optimizer. Its shape is [1] and there's no need to prune this one-value variable. + if var_tmp is None or var_tmp.shape == [1]: + if var_tmp is not None: print(var_tmp.name, var_tmp.shape) + continue + t_value = var_tmp.value().get_tensor() + value = np.array(t_value).astype("float32") + + pruned_value = np.apply_along_axis(lambda data: data[bool_mask], + dims, value) + + p = t_value._place() + if p.is_cpu_place(): + place = paddle.CPUPlace() + elif p.is_cuda_pinned_place(): + place = paddle.CUDAPinnedPlace() + else: + p = core.Place() + p.set_place(t_value._place()) + place = paddle.CUDAPlace(p.gpu_device_id()) + + t_value.set(pruned_value, place) + + def _buffer_opt(self, param_name, sub_layer, opt): + if opt is None: + return + for k, v in opt._accumulators.items(): + var_tmp = v.get(param_name) + if var_tmp is None: continue + backup_name = var_tmp.name.replace(".", "_") + "_backup" + if backup_name not in sub_layer._buffers: + sub_layer.register_buffer( + backup_name, paddle.to_tensor(var_tmp.value().get_tensor())) + _logger.debug("Backup values of {} into buffers.".format( + var_tmp.name)) + + def _restore_opt(self, param_name, sub_layer, opt): + if opt is None: + return + for k, v in opt._accumulators.items(): + var_tmp = v.get(param_name) + if var_tmp is None: continue + backup_name = var_tmp.name.replace(".", "_") + "_backup" + if backup_name in sub_layer._buffers: + _logger.debug("Restore values of variable: {}".format( + var_tmp.name)) + t_value = var_tmp.value().get_tensor() + t_backup = sub_layer._buffers[backup_name].value().get_tensor() + + p = t_value._place() + if p.is_cpu_place(): + place = paddle.CPUPlace() + elif p.is_cuda_pinned_place(): + place = paddle.CUDAPinnedPlace() + else: + p = core.Place() + p.set_place(t_value._place()) + place = paddle.CUDAPlace(p.gpu_device_id()) + + t_value.set(np.array(t_backup).astype("float32"), place) + del sub_layer._buffers[backup_name] + + def apply(self, model, lazy=False, opt=None): if lazy: self.lazy_apply(model) else: - self.imperative_apply(model) + self.imperative_apply(model, opt) def lazy_apply(self, model): for name, sub_layer in model.named_sublayers(): @@ -136,12 +202,11 @@ class PruningPlan(): t_value.set(value * expand_mask, place) - def imperative_apply(self, model): + def imperative_apply(self, model, opt=None): """ Pruning values of variable imperatively. It is valid when pruning on one dimension. """ - for name, sub_layer in model.named_sublayers(): for param in sub_layer.parameters(include_sublayers=False): if param.name in self._masks: @@ -173,8 +238,13 @@ class PruningPlan(): paddle.to_tensor(value)) _logger.debug("Backup values of {} into buffers.". format(param.name)) + # save optimizer accumulators into layer buffer + self._buffer_opt(param.name, sub_layer, opt) + pruned_value = np.apply_along_axis( lambda data: data[bool_mask], dims, value) + self._prune_opt(param.name, dims, bool_mask, opt) + p = t_value._place() if p.is_cpu_place(): place = paddle.CPUPlace() @@ -184,16 +254,17 @@ class PruningPlan(): p = core.Place() p.set_place(t_value._place()) place = paddle.CUDAPlace(p.gpu_device_id()) - t_value.set(pruned_value, place) # for training if param.trainable: param.clear_gradient() - def restore(self, model): + def restore(self, model, opt=None): for name, sub_layer in model.named_sublayers(): for param in sub_layer.parameters(include_sublayers=False): + # restore optimizer accumulators from layer buffer + self._restore_opt(param.name, sub_layer, opt) backup_name = "_".join([param.name.replace(".", "_"), "backup"]) if backup_name in sub_layer._buffers: _logger.debug("Restore values of variable: {}".format( diff --git a/tests/dygraph/test_filter_pruner.py b/tests/dygraph/test_filter_pruner.py index f909e382..c6e5bc6f 100644 --- a/tests/dygraph/test_filter_pruner.py +++ b/tests/dygraph/test_filter_pruner.py @@ -72,11 +72,11 @@ class TestFilterPruner(unittest.TestCase): paddle.metric.Accuracy(topk=(1, 5))) model.fit(self.train_dataset, epochs=1, batch_size=128, verbose=1) pruners = [] - pruner = L1NormFilterPruner(net, [1, 1, 28, 28]) + pruner = L1NormFilterPruner(net, [1, 1, 28, 28], opt=optimizer) pruners.append(pruner) - pruner = FPGMFilterPruner(net, [1, 1, 28, 28]) + pruner = FPGMFilterPruner(net, [1, 1, 28, 28], opt=optimizer) pruners.append(pruner) - pruner = L2NormFilterPruner(net, [1, 1, 28, 28]) + pruner = L2NormFilterPruner(net, [1, 1, 28, 28], opt=optimizer) pruners.append(pruner) def eval_fn(): @@ -90,6 +90,10 @@ class TestFilterPruner(unittest.TestCase): eval_func=eval_fn, sen_file=sen_file, target_vars=self._param_names) + model.fit(self.train_dataset, + epochs=1, + batch_size=128, + verbose=1) base_acc = eval_fn() plan = pruner.sensitive_prune(0.01) pruner.restore() @@ -165,7 +169,7 @@ class TestPruningGroupConv2d(unittest.TestCase): def add_cases(suite): # suite.addTest(TestStatus()) - # suite.addTest(TestFilterPruner(param_names=["conv2d_0.w_0"])) + suite.addTest(TestFilterPruner(param_names=["conv2d_0.w_0"])) suite.addTest(TestPruningGroupConv2d()) -- GitLab