提交 ad1a2fd1 编写于 作者: W weishengyu

move slim into arch

上级 7c6567cc
......@@ -26,7 +26,7 @@ from .utils import *
from ppcls.arch.backbone.base.theseus_layer import TheseusLayer
from ppcls.utils import logger
from ppcls.utils.save_load import load_dygraph_pretrain
from ppcls.engine.slim import prune_model, quantize_model
from ppcls.arch.slim import prune_model, quantize_model
__all__ = ["build_model", "RecModel", "DistillationModel"]
......
......@@ -12,5 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from ppcls.engine.slim.prune import prune_model
from ppcls.engine.slim.quant import quantize_model
from ppcls.arch.slim.prune import prune_model
......@@ -186,8 +186,6 @@ class Engine(object):
# build model
self.model = build_model(self.config)
self.quanted = self.config.get("Slim", {}).get("quant", False)
self.pruned = self.config.get("Slim", {}).get("prune", False)
# set @to_static for benchmark, skip this by default.
apply_to_static(self.config, self.model)
......@@ -368,7 +366,7 @@ class Engine(object):
model.eval()
save_path = os.path.join(self.config["Global"]["save_inference_dir"],
"inference")
if self.quanted:
if model.quanter:
model.quanter.save_quantized_model(
model.base_model,
save_path,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册