diff --git a/demo/dygraph/unstructured_pruning/train.py b/demo/dygraph/unstructured_pruning/train.py index 0b9cc143a1583a4096acad934fac5ebd7721e7e5..a4b237964ef0dd672238744faeca1a24a094fb7b 100644 --- a/demo/dygraph/unstructured_pruning/train.py +++ b/demo/dygraph/unstructured_pruning/train.py @@ -22,6 +22,7 @@ _logger = get_logger(__name__, level=logging.INFO) parser = argparse.ArgumentParser(description=__doc__) add_arg = functools.partial(add_arguments, argparser=parser) # yapf: disable +add_arg('use_gpu', bool, True, "Whether to use GPU for training or not. Default: True") add_arg('batch_size', int, 64, "Minibatch size. Default: 64") add_arg('batch_size_for_validation', int, 64, "Minibatch size for validation. Default: 64") add_arg('lr', float, 0.05, "The learning rate used to fine-tune pruned model. Default: 0.05") @@ -105,7 +106,10 @@ def create_unstructured_pruner(model, args, configs=None): def compress(args): - place = paddle.set_device('gpu') + if args.use_gpu: + place = paddle.set_device('gpu') + else: + place = paddle.set_device('cpu') trainer_num = paddle.distributed.get_world_size() use_data_parallel = trainer_num != 1 diff --git a/demo/dygraph/unstructured_pruning/train_imagenet_cpu_base.sh b/demo/dygraph/unstructured_pruning/train_imagenet_cpu_base.sh new file mode 100644 index 0000000000000000000000000000000000000000..6a26b8eddeadd7178b1921d8241900216b08fbe9 --- /dev/null +++ b/demo/dygraph/unstructured_pruning/train_imagenet_cpu_base.sh @@ -0,0 +1,9 @@ +#!/bin/bash +CUDA_VISIBLE_DEVICES='' python \ + train.py \ + --batch_size 64 \ + --data imagenet \ + --pruning_mode ratio \ + --ratio 0.55 \ + --lr 0.05 \ + --use_gpu False diff --git a/demo/unstructured_prune/train.py b/demo/unstructured_prune/train.py index 31efbf294dc92ab05651ce068d2be4892405b293..1c0362def8df981a8d1052846616c52607801149 100644 --- a/demo/unstructured_prune/train.py +++ b/demo/unstructured_prune/train.py @@ -21,6 +21,7 @@ _logger = get_logger(__name__, level=logging.INFO) parser = argparse.ArgumentParser(description=__doc__) add_arg = functools.partial(add_arguments, argparser=parser) # yapf: disable +add_arg('use_gpu', bool, True, "Whether to use gpu for traning or not. Defauly: True") add_arg('batch_size', int, 64, "Minibatch size. Default: 64") add_arg('batch_size_for_validation', int, 64, "Minibatch size for validation. Default: 64") add_arg('model', str, "MobileNet", "The target model.") @@ -137,7 +138,10 @@ def compress(args): image_shape = [int(m) for m in image_shape.split(",")] assert args.model in model_list, "{} is not in lists: {}".format(args.model, model_list) - places = paddle.static.cuda_places() + if args.use_gpu: + places = paddle.static.cuda_places() + else: + places = paddle.static.cpu_places() place = places[0] exe = paddle.static.Executor(place) diff --git a/demo/unstructured_prune/train_imagenet_cpu_base.sh b/demo/unstructured_prune/train_imagenet_cpu_base.sh new file mode 100644 index 0000000000000000000000000000000000000000..5cfe461bdfaa43f025fd3d0a13bc0151a2f5a60b --- /dev/null +++ b/demo/unstructured_prune/train_imagenet_cpu_base.sh @@ -0,0 +1,9 @@ +CUDA_VISIBLE_DEVICES='' python train.py \ + --batch_size 64 \ + --data imagenet \ + --pruning_mode ratio \ + --ratio 0.55 \ + --lr 0.05 \ + --model MobileNet \ + --pretrained_model "MobileNetV1_pretrained" \ + --use_gpu False \