diff --git a/demo/ofa/bert/README.md b/demo/ofa/bert/README.md
index 83054a4c01bf78c5ff089d469823bf1920bc0d31..b4f39186ed656f294132fd85923fbf633cb31c18 100644
--- a/demo/ofa/bert/README.md
+++ b/demo/ofa/bert/README.md
@@ -1,6 +1,7 @@
# OFA压缩PaddleNLP-BERT模型
-BERT-base模型是一个迁移能力很强的通用语义表示模型,但是模型中也有一些参数冗余。本教程将介绍如何使用PaddleSlim对[PaddleNLP](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/)中BERT-base模型进行压缩。
+BERT-base模型是一个迁移能力很强的通用语义表示模型,但是模型中也有一些参数冗余。本教程将介绍如何使用PaddleSlim对[PaddleNLP](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/)中BERT-base模型进行压缩。
+本教程只会演示如何快速启动相应训练,详细教程请参考: [BERT](https://github.com/PaddlePaddle/PaddleSlim/blob/release/2.0.0/docs/zh_cn/nlp/paddlenlp_slim_ofa_tutorial.md)
## 1. 压缩结果
@@ -82,7 +83,7 @@ BERT-base模型是一个迁移能力很强的通用语义表示模型,但是
V100 |
16 |
- BERT
+ BERT
|
N
@@ -106,7 +107,7 @@ BERT-base模型是一个迁移能力很强的通用语义表示模型,但是
| Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz |
16 |
- BERT
+ BERT
|
N
@@ -184,7 +185,7 @@ python -u ./run_glue_ofa.py --model_type bert \
压缩训练之后在dev上的结果如表1-1中『Result with PaddleSlim』列所示,延时情况如表1-2所示。
## 3. OFA接口介绍
-OFA API介绍参考[API](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/docs/zh_cn/api_cn/ofa_api.rst)
+OFA API介绍参考[API](https://github.com/PaddlePaddle/PaddleSlim/blob/release/2.0.0/docs/zh_cn/api_cn/dygraph/ofa/ofa_api.rst)
# 基于本代码对TinyBERT(L=4, D=312)进行压缩
下游任务模型是从TinyBERT官方repo转换得到。
diff --git a/demo/ofa/ernie/README.md b/demo/ofa/ernie/README.md
index 6143bfc6b921fa11d307eb6ed1bd0be5d8bfd1cb..32212b30c373c09b778d3743d467df243e5f09f9 100644
--- a/demo/ofa/ernie/README.md
+++ b/demo/ofa/ernie/README.md
@@ -2,6 +2,8 @@
ERNIE是百度开创性提出的基于知识增强的持续学习语义理解框架,该框架将大数据预训练与多源丰富知识相结合,通过持续学习技术,不断吸收海量文本数据中词汇、结构、语义等方面的知识,实现模型效果不断进化。本教程讲介绍如何使用PaddleSlim对[ERNIE](https://github.com/PaddlePaddle/ERNIE)模型进行压缩。
+本教程只会演示如何快速启动相应训练,详细教程请参考:[ERNIE](https://github.com/PaddlePaddle/PaddleSlim/blob/release/2.0.0/docs/zh_cn/nlp/ernie_slim_ofa_tutorial.md)
+
使用本教程压缩算法可以在精度无损的情况下,对原始Tiny-ERNIE模型进行40%的加速。
## 1. 快速开始
@@ -41,4 +43,4 @@ python ./ofa_ernie.py \
- `depth_mult_list` 表示压缩训练过程中,模型包含的Transformer Block数量的选择的范围。
## 2. OFA接口介绍
-TODO
+OFA API介绍参考[API](https://github.com/PaddlePaddle/PaddleSlim/blob/release/2.0.0/docs/zh_cn/api_cn/dygraph/ofa/ofa_api.rst)
diff --git a/demo/ofa/ernie/ernie_supernet/importance.py b/demo/ofa/ernie/ernie_supernet/importance.py
index c27f89dd70cb681b30fb154925a4d60038b7241f..061f41b86df9be227745b97c7d0fe6558f76996e 100644
--- a/demo/ofa/ernie/ernie_supernet/importance.py
+++ b/demo/ofa/ernie/ernie_supernet/importance.py
@@ -52,12 +52,13 @@ def compute_neuron_head_importance(args, model, dev_ds, place, model_cfg):
for eval_task in eval_task_names:
for batch in dev_ds.start(place):
ids, sids, label = batch
- loss, _, _ = model(
+ out = model(
ids,
sids,
labels=label,
head_mask=head_mask,
num_layers=model_cfg['num_hidden_layers'])
+ loss = out[0]
loss.backward()
head_importance += L.abs(FD.to_variable(head_mask.gradient()))
diff --git a/paddleslim/nas/ofa/get_sub_model.py b/paddleslim/nas/ofa/get_sub_model.py
index 1b93e694703e4a7c87775303397f7ce6c7f6b5e4..a7c4a6c816a0c2276d9e496b8f202fce43db1da1 100644
--- a/paddleslim/nas/ofa/get_sub_model.py
+++ b/paddleslim/nas/ofa/get_sub_model.py
@@ -14,6 +14,7 @@
import numpy as np
import paddle
+from paddle.fluid import core
__all__ = ['get_prune_params_config', 'prune_params']
@@ -96,11 +97,11 @@ def prune_params(model, param_config, super_model_sd=None):
p = t_value._place()
if p.is_cpu_place():
- place = paddle.CPUPlace()
+ place = core.CPUPlace()
elif p.is_cuda_pinned_place():
- place = paddle.CUDAPinnedPlace()
+ place = core.CUDAPinnedPlace()
else:
- place = paddle.CUDAPlace(p.gpu_device_id())
+ place = core.CUDAPlace(p.gpu_device_id())
t_value.set(prune_value, place)
if param.trainable:
param.clear_gradient()
diff --git a/paddleslim/nas/ofa/layers_old.py b/paddleslim/nas/ofa/layers_old.py
index 25dcd1525b8439d07fdcb70601b560a41b7de2fb..9b4058c182eec7a229772926f2e633ed11b70b26 100644
--- a/paddleslim/nas/ofa/layers_old.py
+++ b/paddleslim/nas/ofa/layers_old.py
@@ -931,10 +931,10 @@ class SuperBatchNorm(fluid.dygraph.BatchNorm):
"use_mkldnn", False, "fuse_with_relu", self._fuse_with_relu,
"use_global_stats", self._use_global_stats,
'trainable_statistics', self._trainable_statistics)
- batch_norm_out, _, _, _, _, _ = core.ops.batch_norm(
+ batch_norm_out = core.ops.batch_norm(
input, weight, bias, mean, variance, mean_out, variance_out, *attrs)
return dygraph_utils._append_activation_in_dygraph(
- batch_norm_out, act=self._act)
+ batch_norm_out[0], act=self._act)
class SuperInstanceNorm(fluid.dygraph.InstanceNorm):
|