diff --git a/demo/auto_prune/train.py b/demo/auto_prune/train.py index d65dd875a8d650b57bbe429514e99dc6fa46e630..ff55a54597ce7e923764f9e6f8dff232d2d28658 100644 --- a/demo/auto_prune/train.py +++ b/demo/auto_prune/train.py @@ -116,8 +116,8 @@ def compress(args): fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist) - val_reader = paddle.batch(val_reader, batch_size=args.batch_size) - train_reader = paddle.batch( + val_reader = paddle.fluid.io.batch(val_reader, batch_size=args.batch_size) + train_reader = paddle.fluid.io.batch( train_reader, batch_size=args.batch_size, drop_last=True) train_feeder = feeder = fluid.DataFeeder([image, label], place) diff --git a/demo/auto_prune/train_finetune.py b/demo/auto_prune/train_finetune.py index 6508f435bb52cfc0421c77bc0c893bff7f315d03..0dcb56bfe7988475cc7c63ebde257897456bfb22 100644 --- a/demo/auto_prune/train_finetune.py +++ b/demo/auto_prune/train_finetune.py @@ -34,12 +34,12 @@ add_arg('config_file', str, None, "The config file for comp model_list = [m for m in dir(models) if "__" not in m] ratiolist = [ -# [0.06, 0.0, 0.09, 0.03, 0.09, 0.02, 0.05, 0.03, 0.0, 0.07, 0.07, 0.05, 0.08], -# [0.08, 0.02, 0.03, 0.13, 0.1, 0.06, 0.03, 0.04, 0.14, 0.02, 0.03, 0.02, 0.01], - ] + # [0.06, 0.0, 0.09, 0.03, 0.09, 0.02, 0.05, 0.03, 0.0, 0.07, 0.07, 0.05, 0.08], + # [0.08, 0.02, 0.03, 0.13, 0.1, 0.06, 0.03, 0.04, 0.14, 0.02, 0.03, 0.02, 0.01], +] -def save_model(args, exe, train_prog, eval_prog,info): +def save_model(args, exe, train_prog, eval_prog, info): model_path = os.path.join(args.model_save_dir, args.model, str(info)) if not os.path.isdir(model_path): os.makedirs(model_path) @@ -58,29 +58,31 @@ def piecewise_decay(args): regularization=fluid.regularizer.L2Decay(args.l2_decay)) return optimizer + def cosine_decay(args): step = int(math.ceil(float(args.total_images) / args.batch_size)) learning_rate = fluid.layers.cosine_decay( - learning_rate=args.lr, - step_each_epoch=step, - epochs=args.num_epochs) + learning_rate=args.lr, step_each_epoch=step, epochs=args.num_epochs) optimizer = fluid.optimizer.Momentum( learning_rate=learning_rate, momentum=args.momentum_rate, regularization=fluid.regularizer.L2Decay(args.l2_decay)) return optimizer + def create_optimizer(args): if args.lr_strategy == "piecewise_decay": return piecewise_decay(args) elif args.lr_strategy == "cosine_decay": return cosine_decay(args) + def compress(args): - class_dim=1000 - image_shape="3,224,224" + class_dim = 1000 + image_shape = "3,224,224" image_shape = [int(m) for m in image_shape.split(",")] - assert args.model in model_list, "{} is not in lists: {}".format(args.model, model_list) + assert args.model in model_list, "{} is not in lists: {}".format( + args.model, model_list) image = fluid.layers.data(name='image', shape=image_shape, dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') # model definition @@ -98,18 +100,22 @@ def compress(args): exe.run(fluid.default_startup_program()) if args.pretrained_model: + def if_exist(var): - exist = os.path.exists(os.path.join(args.pretrained_model, var.name)) - print("exist",exist) + exist = os.path.exists( + os.path.join(args.pretrained_model, var.name)) + print("exist", exist) return exist + #fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist) - val_reader = paddle.batch(reader.val(), batch_size=args.batch_size) - train_reader = paddle.batch( + val_reader = paddle.fluid.io.batch(reader.val(), batch_size=args.batch_size) + train_reader = paddle.fluid.io.batch( reader.train(), batch_size=args.batch_size, drop_last=True) train_feeder = feeder = fluid.DataFeeder([image, label], place) - val_feeder = feeder = fluid.DataFeeder([image, label], place, program=val_program) + val_feeder = feeder = fluid.DataFeeder( + [image, label], place, program=val_program) def test(epoch, program): batch_id = 0 @@ -117,80 +123,99 @@ def compress(args): acc_top5_ns = [] for data in val_reader(): start_time = time.time() - acc_top1_n, acc_top5_n = exe.run(program, - feed=train_feeder.feed(data), - fetch_list=[acc_top1.name, acc_top5.name]) + acc_top1_n, acc_top5_n = exe.run( + program, + feed=train_feeder.feed(data), + fetch_list=[acc_top1.name, acc_top5.name]) end_time = time.time() - print("Eval epoch[{}] batch[{}] - acc_top1: {}; acc_top5: {}; time: {}".format(epoch, batch_id, np.mean(acc_top1_n), np.mean(acc_top5_n), end_time-start_time)) + print( + "Eval epoch[{}] batch[{}] - acc_top1: {}; acc_top5: {}; time: {}". + format(epoch, batch_id, + np.mean(acc_top1_n), + np.mean(acc_top5_n), end_time - start_time)) acc_top1_ns.append(np.mean(acc_top1_n)) acc_top5_ns.append(np.mean(acc_top5_n)) batch_id += 1 - print("Final eval epoch[{}] - acc_top1: {}; acc_top5: {}".format(epoch, np.mean(np.array(acc_top1_ns)), np.mean(np.array(acc_top5_ns)))) - + print("Final eval epoch[{}] - acc_top1: {}; acc_top5: {}".format( + epoch, + np.mean(np.array(acc_top1_ns)), np.mean(np.array(acc_top5_ns)))) def train(epoch, program): build_strategy = fluid.BuildStrategy() exec_strategy = fluid.ExecutionStrategy() train_program = fluid.compiler.CompiledProgram( - program).with_data_parallel( - loss_name=avg_cost.name, - build_strategy=build_strategy, - exec_strategy=exec_strategy) + program).with_data_parallel( + loss_name=avg_cost.name, + build_strategy=build_strategy, + exec_strategy=exec_strategy) batch_id = 0 for data in train_reader(): start_time = time.time() - loss_n, acc_top1_n, acc_top5_n,lr_n = exe.run(train_program, - feed=train_feeder.feed(data), - fetch_list=[avg_cost.name, acc_top1.name, acc_top5.name,"learning_rate"]) + loss_n, acc_top1_n, acc_top5_n, lr_n = exe.run( + train_program, + feed=train_feeder.feed(data), + fetch_list=[ + avg_cost.name, acc_top1.name, acc_top5.name, + "learning_rate" + ]) end_time = time.time() loss_n = np.mean(loss_n) acc_top1_n = np.mean(acc_top1_n) acc_top5_n = np.mean(acc_top5_n) lr_n = np.mean(lr_n) - print("epoch[{}]-batch[{}] - loss: {}; acc_top1: {}; acc_top5: {};lrn: {}; time: {}".format(epoch, batch_id, loss_n, acc_top1_n, acc_top5_n, lr_n,end_time-start_time)) + print( + "epoch[{}]-batch[{}] - loss: {}; acc_top1: {}; acc_top5: {};lrn: {}; time: {}". + format(epoch, batch_id, loss_n, acc_top1_n, acc_top5_n, lr_n, + end_time - start_time)) batch_id += 1 params = [] for param in fluid.default_main_program().global_block().all_parameters(): #if "_weights" in param.name and "conv1_weights" not in param.name: - if "_sep_weights" in param.name: + if "_sep_weights" in param.name: params.append(param.name) - print("fops before pruning: {}".format(flops(fluid.default_main_program()))) + print("fops before pruning: {}".format( + flops(fluid.default_main_program()))) pruned_program_iter = fluid.default_main_program() pruned_val_program_iter = val_program for ratios in ratiolist: pruner = Pruner() - pruned_val_program_iter = pruner.prune(pruned_val_program_iter, - fluid.global_scope(), - params=params, - ratios=ratios, - place=place, - only_graph=True) - - - pruned_program_iter = pruner.prune(pruned_program_iter, - fluid.global_scope(), - params=params, - ratios=ratios, - place=place) + pruned_val_program_iter = pruner.prune( + pruned_val_program_iter, + fluid.global_scope(), + params=params, + ratios=ratios, + place=place, + only_graph=True) + + pruned_program_iter = pruner.prune( + pruned_program_iter, + fluid.global_scope(), + params=params, + ratios=ratios, + place=place) print("fops after pruning: {}".format(flops(pruned_program_iter))) - """ do not inherit learning rate """ - if(os.path.exists(args.pretrained_model + "/learning_rate")): - os.remove( args.pretrained_model + "/learning_rate") - if(os.path.exists(args.pretrained_model + "/@LR_DECAY_COUNTER@")): - os.remove( args.pretrained_model + "/@LR_DECAY_COUNTER@") - fluid.io.load_vars(exe, args.pretrained_model , main_program = pruned_program_iter, predicate=if_exist) + if (os.path.exists(args.pretrained_model + "/learning_rate")): + os.remove(args.pretrained_model + "/learning_rate") + if (os.path.exists(args.pretrained_model + "/@LR_DECAY_COUNTER@")): + os.remove(args.pretrained_model + "/@LR_DECAY_COUNTER@") + fluid.io.load_vars( + exe, + args.pretrained_model, + main_program=pruned_program_iter, + predicate=if_exist) pruned_program = pruned_program_iter pruned_val_program = pruned_val_program_iter for i in range(args.num_epochs): train(i, pruned_program) test(i, pruned_val_program) - save_model(args,exe,pruned_program,pruned_val_program,i) + save_model(args, exe, pruned_program, pruned_val_program, i) + def main(): args = parser.parse_args() diff --git a/demo/auto_prune/train_iterator.py b/demo/auto_prune/train_iterator.py index b30531ea9b7e7247fe44db958f1dd08759580cfb..8b7ca07f768e661476a5943dbc623291a37f939a 100644 --- a/demo/auto_prune/train_iterator.py +++ b/demo/auto_prune/train_iterator.py @@ -41,9 +41,10 @@ add_arg('test_period', int, 10, "Test period in epoches.") model_list = [m for m in dir(models) if "__" not in m] ratiolist = [ -# [0.06, 0.0, 0.09, 0.03, 0.09, 0.02, 0.05, 0.03, 0.0, 0.07, 0.07, 0.05, 0.08], -# [0.08, 0.02, 0.03, 0.13, 0.1, 0.06, 0.03, 0.04, 0.14, 0.02, 0.03, 0.02, 0.01], - ] + # [0.06, 0.0, 0.09, 0.03, 0.09, 0.02, 0.05, 0.03, 0.0, 0.07, 0.07, 0.05, 0.08], + # [0.08, 0.02, 0.03, 0.13, 0.1, 0.06, 0.03, 0.04, 0.14, 0.02, 0.03, 0.02, 0.01], +] + def piecewise_decay(args): step = int(math.ceil(float(args.total_images) / args.batch_size)) @@ -121,8 +122,8 @@ def compress(args): # fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist) - val_reader = paddle.batch(val_reader, batch_size=args.batch_size) - train_reader = paddle.batch( + val_reader = paddle.fluid.io.batch(val_reader, batch_size=args.batch_size) + train_reader = paddle.fluid.io.batch( train_reader, batch_size=args.batch_size, drop_last=True) train_feeder = feeder = fluid.DataFeeder([image, label], place) @@ -194,21 +195,26 @@ def compress(args): for ratios in ratiolist: pruner = Pruner() - pruned_val_program_iter = pruner.prune(pruned_val_program_iter, - fluid.global_scope(), - params=params, - ratios=ratios, - place=place, - only_graph=True) - - - pruned_program_iter = pruner.prune(pruned_program_iter, - fluid.global_scope(), - params=params, - ratios=ratios, - place=place) + pruned_val_program_iter = pruner.prune( + pruned_val_program_iter, + fluid.global_scope(), + params=params, + ratios=ratios, + place=place, + only_graph=True) + + pruned_program_iter = pruner.prune( + pruned_program_iter, + fluid.global_scope(), + params=params, + ratios=ratios, + place=place) print("fops after pruning: {}".format(flops(pruned_program_iter))) - fluid.io.load_vars(exe, args.pretrained_model , main_program = pruned_program_iter, predicate=if_exist) + fluid.io.load_vars( + exe, + args.pretrained_model, + main_program=pruned_program_iter, + predicate=if_exist) pruner = AutoPruner( pruned_val_program_iter, @@ -238,8 +244,6 @@ def compress(args): pruner.reward(score) - - def main(): args = parser.parse_args() print_arguments(args) diff --git a/demo/distillation/distill.py b/demo/distillation/distill.py index 3bafa159ea95690198f34e62707caf53f64d0bf6..f1b2be691ff34f4765568de342d008e7d2657ff6 100644 --- a/demo/distillation/distill.py +++ b/demo/distillation/distill.py @@ -133,9 +133,9 @@ def compress(args): place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) - train_reader = paddle.batch( + train_reader = paddle.fluid.io.batch( train_reader, batch_size=args.batch_size, drop_last=True) - val_reader = paddle.batch( + val_reader = paddle.fluid.io.batch( val_reader, batch_size=args.batch_size, drop_last=True) val_program = student_program.clone(for_test=True) diff --git a/demo/distillation/image_classification_distillation_tutorial.ipynb b/demo/distillation/image_classification_distillation_tutorial.ipynb index 28c1de23050fd1944401c095304dba4674aa965b..e1d679cdabaafc3eb71a5382dff1da0946b0b799 100644 --- a/demo/distillation/image_classification_distillation_tutorial.ipynb +++ b/demo/distillation/image_classification_distillation_tutorial.ipynb @@ -165,7 +165,7 @@ "metadata": {}, "outputs": [], "source": [ - "train_reader = paddle.batch(\n", + "train_reader = paddle.fluid.io.batch(\n", " paddle.dataset.mnist.train(), batch_size=128, drop_last=True)\n", "train_feeder = fluid.DataFeeder(['image', 'label'], fluid.CPUPlace(), student_program)" ] diff --git a/demo/nas/block_sa_nas_mobilenetv2.py b/demo/nas/block_sa_nas_mobilenetv2.py index e4572583736e650235a1832faed82f01ba72b754..a32f97b0b8bb5b7bbe20fa0842e94ad58ff9751d 100644 --- a/demo/nas/block_sa_nas_mobilenetv2.py +++ b/demo/nas/block_sa_nas_mobilenetv2.py @@ -137,22 +137,22 @@ def search_mobilenetv2_block(config, args, image_size): exe.run(startup_program) if args.data == 'cifar10': - train_reader = paddle.batch( + train_reader = paddle.fluid.io.batch( paddle.reader.shuffle( paddle.dataset.cifar.train10(cycle=False), buf_size=1024), batch_size=args.batch_size, drop_last=True) - test_reader = paddle.batch( + test_reader = paddle.fluid.io.batch( paddle.dataset.cifar.test10(cycle=False), batch_size=args.batch_size, drop_last=False) elif args.data == 'imagenet': - train_reader = paddle.batch( + train_reader = paddle.fluid.io.batch( imagenet_reader.train(), batch_size=args.batch_size, drop_last=True) - test_reader = paddle.batch( + test_reader = paddle.fluid.io.batch( imagenet_reader.val(), batch_size=args.batch_size, drop_last=False) diff --git a/demo/nas/image_classification_nas_quick_start.ipynb b/demo/nas/image_classification_nas_quick_start.ipynb index 78d07117b6fd37111d8697195b010da0d485bf69..72f04cf848b8caec65b65b177c99d15ebfa05cc6 100644 --- a/demo/nas/image_classification_nas_quick_start.ipynb +++ b/demo/nas/image_classification_nas_quick_start.ipynb @@ -114,9 +114,9 @@ " if current_flops > 321208544:\n", " continue\n", " \n", - " train_reader = paddle.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024),batch_size=256)\n", + " train_reader = paddle.fluid.io.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024),batch_size=256)\n", " train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())\n", - " test_reader = paddle.batch(paddle.dataset.cifar.test10(cycle=False),\n", + " test_reader = paddle.fluid.io.batch(paddle.dataset.cifar.test10(cycle=False),\n", " batch_size=256)\n", " test_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())\n", "\n", @@ -160,4 +160,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} \ No newline at end of file +} diff --git a/demo/nas/parl_nas_mobilenetv2.py b/demo/nas/parl_nas_mobilenetv2.py index 1d698742cde532c8c3b2b1fb03183a783e2b15da..ef9195fb2dbb8ddf34ce574e4e6c823d1e0b790e 100644 --- a/demo/nas/parl_nas_mobilenetv2.py +++ b/demo/nas/parl_nas_mobilenetv2.py @@ -105,22 +105,22 @@ def search_mobilenetv2(config, args, image_size, is_server=True): exe.run(startup_program) if args.data == 'cifar10': - train_reader = paddle.batch( + train_reader = paddle.fluid.io.batch( paddle.reader.shuffle( paddle.dataset.cifar.train10(cycle=False), buf_size=1024), batch_size=args.batch_size, drop_last=True) - test_reader = paddle.batch( + test_reader = paddle.fluid.io.batch( paddle.dataset.cifar.test10(cycle=False), batch_size=args.batch_size, drop_last=False) elif args.data == 'imagenet': - train_reader = paddle.batch( + train_reader = paddle.fluid.io.batch( imagenet_reader.train(), batch_size=args.batch_size, drop_last=True) - test_reader = paddle.batch( + test_reader = paddle.fluid.io.batch( imagenet_reader.val(), batch_size=args.batch_size, drop_last=False) diff --git a/demo/nas/rl_nas_mobilenetv2.py b/demo/nas/rl_nas_mobilenetv2.py index 3eb562334120b5baaa118a6baa1718767eb91015..4997a530ea1549f4afb4fb7d7fa51ab7115f2b65 100644 --- a/demo/nas/rl_nas_mobilenetv2.py +++ b/demo/nas/rl_nas_mobilenetv2.py @@ -109,22 +109,22 @@ def search_mobilenetv2(config, args, image_size, is_server=True): exe.run(startup_program) if args.data == 'cifar10': - train_reader = paddle.batch( + train_reader = paddle.fluid.io.batch( paddle.reader.shuffle( paddle.dataset.cifar.train10(cycle=False), buf_size=1024), batch_size=args.batch_size, drop_last=True) - test_reader = paddle.batch( + test_reader = paddle.fluid.io.batch( paddle.dataset.cifar.test10(cycle=False), batch_size=args.batch_size, drop_last=False) elif args.data == 'imagenet': - train_reader = paddle.batch( + train_reader = paddle.fluid.io.batch( imagenet_reader.train(), batch_size=args.batch_size, drop_last=True) - test_reader = paddle.batch( + test_reader = paddle.fluid.io.batch( imagenet_reader.val(), batch_size=args.batch_size, drop_last=False) diff --git a/demo/nas/sa_nas_mobilenetv2.py b/demo/nas/sa_nas_mobilenetv2.py index 9ef26bc5d1d75a10cd8ebf7183bb70b8dbd2b354..da6f17548a0881aa325524c513c048324641210d 100644 --- a/demo/nas/sa_nas_mobilenetv2.py +++ b/demo/nas/sa_nas_mobilenetv2.py @@ -102,22 +102,22 @@ def search_mobilenetv2(config, args, image_size, is_server=True): exe.run(startup_program) if args.data == 'cifar10': - train_reader = paddle.batch( + train_reader = paddle.fluid.io.batch( paddle.reader.shuffle( paddle.dataset.cifar.train10(cycle=False), buf_size=1024), batch_size=args.batch_size, drop_last=True) - test_reader = paddle.batch( + test_reader = paddle.fluid.io.batch( paddle.dataset.cifar.test10(cycle=False), batch_size=args.batch_size, drop_last=False) elif args.data == 'imagenet': - train_reader = paddle.batch( + train_reader = paddle.fluid.io.batch( imagenet_reader.train(), batch_size=args.batch_size, drop_last=True) - test_reader = paddle.batch( + test_reader = paddle.fluid.io.batch( imagenet_reader.val(), batch_size=args.batch_size, drop_last=False) @@ -197,22 +197,22 @@ def test_search_result(tokens, image_size, args, config): exe.run(startup_program) if args.data == 'cifar10': - train_reader = paddle.batch( + train_reader = paddle.fluid.io.batch( paddle.reader.shuffle( paddle.dataset.cifar.train10(cycle=False), buf_size=1024), batch_size=args.batch_size, drop_last=True) - test_reader = paddle.batch( + test_reader = paddle.fluid.io.batch( paddle.dataset.cifar.test10(cycle=False), batch_size=args.batch_size, drop_last=False) elif args.data == 'imagenet': - train_reader = paddle.batch( + train_reader = paddle.fluid.io.batch( imagenet_reader.train(), batch_size=args.batch_size, drop_last=True) - test_reader = paddle.batch( + test_reader = paddle.fluid.io.batch( imagenet_reader.val(), batch_size=args.batch_size, drop_last=False) train_loader.set_sample_list_generator( diff --git a/demo/one_shot/train.py b/demo/one_shot/train.py index e9904e45143f879bd6d1019dc6402898780173b1..5e8267ff66f37f5b24807eb86c3bdad7182de2b7 100644 --- a/demo/one_shot/train.py +++ b/demo/one_shot/train.py @@ -113,7 +113,7 @@ def test_mnist(model, tokens=None): acc_set = [] avg_loss_set = [] batch_size = 64 - test_reader = paddle.batch( + test_reader = paddle.fluid.io.batch( paddle.dataset.mnist.test(), batch_size=batch_size, drop_last=True) for batch_id, data in enumerate(test_reader()): dy_x_data = np.array([x[0].reshape(1, 28, 28) @@ -145,7 +145,7 @@ def train_mnist(args, model, tokens=None): adam = AdamOptimizer( learning_rate=0.001, parameter_list=model.parameters()) - train_reader = paddle.batch( + train_reader = paddle.fluid.io.batch( paddle.dataset.mnist.train(), batch_size=BATCH_SIZE, drop_last=True) if args.use_data_parallel: train_reader = fluid.contrib.reader.distributed_batch_reader( diff --git a/demo/prune/eval.py b/demo/prune/eval.py index 0cd2a9dc6049233104c412a532335e5dd6bf5c7f..b2c9ea26cbb2223c9cb0ee4bac0962680637f2db 100644 --- a/demo/prune/eval.py +++ b/demo/prune/eval.py @@ -63,7 +63,7 @@ def eval(args): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - val_reader = paddle.batch(val_reader, batch_size=args.batch_size) + val_reader = paddle.fluid.io.batch(val_reader, batch_size=args.batch_size) val_feeder = feeder = fluid.DataFeeder( [image, label], place, program=val_program) diff --git a/demo/prune/image_classification_pruning_quick_start.ipynb b/demo/prune/image_classification_pruning_quick_start.ipynb index 849fd8a67fa8478b28c9cea53017fddeec826b0f..670a2edf611629a9f33b694b59b24138b0bfdc56 100644 --- a/demo/prune/image_classification_pruning_quick_start.ipynb +++ b/demo/prune/image_classification_pruning_quick_start.ipynb @@ -161,7 +161,7 @@ "outputs": [], "source": [ "import paddle.dataset.mnist as reader\n", - "train_reader = paddle.batch(\n", + "train_reader = paddle.fluid.io.batch(\n", " reader.train(), batch_size=128, drop_last=True)\n", "train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())" ] diff --git a/demo/prune/train.py b/demo/prune/train.py index 231cda9cc183ede5f83bf4f51033d8d321fd1e1a..73e08756f34bd212db3b2239351cf9f8ce6962d9 100644 --- a/demo/prune/train.py +++ b/demo/prune/train.py @@ -142,8 +142,8 @@ def compress(args): args.pretrained_model)) fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist) - val_reader = paddle.batch(val_reader, batch_size=args.batch_size) - train_reader = paddle.batch( + val_reader = paddle.fluid.io.batch(val_reader, batch_size=args.batch_size) + train_reader = paddle.fluid.io.batch( train_reader, batch_size=args.batch_size, drop_last=True) train_feeder = feeder = fluid.DataFeeder([image, label], place) diff --git a/demo/quant/quant_aware/image_classification_training_aware_quantization_quick_start.ipynb b/demo/quant/quant_aware/image_classification_training_aware_quantization_quick_start.ipynb index 495e3e1655074cffbb5a390c335ed47399d0968b..0c771cc096b26d79cc7d0b0b2aa87ff29bbce27f 100755 --- a/demo/quant/quant_aware/image_classification_training_aware_quantization_quick_start.ipynb +++ b/demo/quant/quant_aware/image_classification_training_aware_quantization_quick_start.ipynb @@ -81,9 +81,9 @@ "outputs": [], "source": [ "import paddle.dataset.mnist as reader\n", - "train_reader = paddle.batch(\n", + "train_reader = paddle.fluid.io.batch(\n", " reader.train(), batch_size=128, drop_last=True)\n", - "test_reader = paddle.batch(\n", + "test_reader = paddle.fluid.io.batch(\n", " reader.train(), batch_size=128, drop_last=True)\n", "train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())" ] diff --git a/demo/quant/quant_aware/train.py b/demo/quant/quant_aware/train.py index 81016e2931a3bf66455e0528b99a573885549215..f8040a5917aa054fe248f69d83c1bc65fed64ed3 100644 --- a/demo/quant/quant_aware/train.py +++ b/demo/quant/quant_aware/train.py @@ -159,8 +159,8 @@ def compress(args): fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist) - val_reader = paddle.batch(val_reader, batch_size=args.batch_size) - train_reader = paddle.batch( + val_reader = paddle.fluid.io.batch(val_reader, batch_size=args.batch_size) + train_reader = paddle.fluid.io.batch( train_reader, batch_size=args.batch_size, drop_last=True) train_feeder = feeder = fluid.DataFeeder([image, label], place) diff --git a/demo/quant/quant_post/eval.py b/demo/quant/quant_post/eval.py index d7414b0973226425e9627137c185a39f26603fb9..c144fd1690ed55304cc722472cb155814bd26f64 100755 --- a/demo/quant/quant_post/eval.py +++ b/demo/quant/quant_post/eval.py @@ -46,7 +46,7 @@ def eval(args): exe, model_filename=args.model_name, params_filename=args.params_name) - val_reader = paddle.batch(reader.val(), batch_size=128) + val_reader = paddle.fluid.io.batch(reader.val(), batch_size=128) feeder = fluid.DataFeeder( place=place, feed_list=feed_target_names, program=val_program) diff --git a/demo/quant/quant_post/image_classification_post_training_quantization_quick_start.ipynb b/demo/quant/quant_post/image_classification_post_training_quantization_quick_start.ipynb index 23b813e6c2525341579d26c13908a89ed96c1017..bdc0bc32e7c389bebd765c9f3726e4acb84f2e0e 100755 --- a/demo/quant/quant_post/image_classification_post_training_quantization_quick_start.ipynb +++ b/demo/quant/quant_post/image_classification_post_training_quantization_quick_start.ipynb @@ -79,9 +79,9 @@ "outputs": [], "source": [ "import paddle.dataset.mnist as reader\n", - "train_reader = paddle.batch(\n", + "train_reader = paddle.fluid.io.batch(\n", " reader.train(), batch_size=128, drop_last=True)\n", - "test_reader = paddle.batch(\n", + "test_reader = paddle.fluid.io.batch(\n", " reader.train(), batch_size=128, drop_last=True)\n", "train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())" ] diff --git a/demo/sensitive/image_classification_sensitivity_analysis.ipynb b/demo/sensitive/image_classification_sensitivity_analysis.ipynb index 002bc8ee07b40069fef9f09fd7940d2a45e94ebb..19234337f744ac01aceaaf92b7443b5e97e40923 100644 --- a/demo/sensitive/image_classification_sensitivity_analysis.ipynb +++ b/demo/sensitive/image_classification_sensitivity_analysis.ipynb @@ -73,9 +73,9 @@ "outputs": [], "source": [ "import paddle.dataset.mnist as reader\n", - "train_reader = paddle.batch(\n", + "train_reader = paddle.fluid.io.batch(\n", " reader.train(), batch_size=128, drop_last=True)\n", - "test_reader = paddle.batch(\n", + "test_reader = paddle.fluid.io.batch(\n", " reader.test(), batch_size=128, drop_last=True)\n", "data_feeder = fluid.DataFeeder(inputs, place)" ] diff --git a/demo/sensitive/train.py b/demo/sensitive/train.py index 7137b95368aba9b2b46b734b0151a1fa18ad0dc7..693d6bf4e167d68b04c255485a3b64282cbebdc1 100644 --- a/demo/sensitive/train.py +++ b/demo/sensitive/train.py @@ -68,7 +68,7 @@ def compress(args): fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist) - val_reader = paddle.batch(val_reader, batch_size=args.batch_size) + val_reader = paddle.fluid.io.batch(val_reader, batch_size=args.batch_size) val_feeder = feeder = fluid.DataFeeder( [image, label], place, program=val_program) diff --git a/demo/sensitive_prune/greedy_prune.py b/demo/sensitive_prune/greedy_prune.py index e3f8bb57eb3eb0e5c515376970d9484eeca78764..f59b7eaa6e5b02fb977be730ab6f72a2a2518fb8 100644 --- a/demo/sensitive_prune/greedy_prune.py +++ b/demo/sensitive_prune/greedy_prune.py @@ -119,8 +119,8 @@ def compress(args): fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist) - val_reader = paddle.batch(val_reader, batch_size=args.batch_size) - train_reader = paddle.batch( + val_reader = paddle.fluid.io.batch(val_reader, batch_size=args.batch_size) + train_reader = paddle.fluid.io.batch( train_reader, batch_size=args.batch_size, drop_last=True) train_feeder = feeder = fluid.DataFeeder([image, label], place) diff --git a/demo/sensitive_prune/prune.py b/demo/sensitive_prune/prune.py index e6c1ba7ccd09f41c8d0652075036a1c279251517..a4cb8ac242558c6daa6b92c51bcea534b406098f 100644 --- a/demo/sensitive_prune/prune.py +++ b/demo/sensitive_prune/prune.py @@ -117,8 +117,8 @@ def compress(args): fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist) - val_reader = paddle.batch(val_reader, batch_size=args.batch_size) - train_reader = paddle.batch( + val_reader = paddle.fluid.io.batch(val_reader, batch_size=args.batch_size) + train_reader = paddle.fluid.io.batch( train_reader, batch_size=args.batch_size, drop_last=True) train_feeder = feeder = fluid.DataFeeder([image, label], place) diff --git a/demo/slimfacenet/dataloader/casia.py b/demo/slimfacenet/dataloader/casia.py index 7faec56cfd94ec1b234d928d8698e4b118ef0a11..4162ef46b5aebdaa6cb6bb4b70a817cb5948df15 100644 --- a/demo/slimfacenet/dataloader/casia.py +++ b/demo/slimfacenet/dataloader/casia.py @@ -79,7 +79,7 @@ if __name__ == '__main__': dataset = CASIA_Face(root=data_dir) print(len(dataset)) print(dataset.class_nums) - trainloader = paddle.batch( + trainloader = paddle.fluid.io.batch( dataset.reader, batch_size=1, drop_last=False) for i in range(10): for data in trainloader(): diff --git a/demo/slimfacenet/lfw_eval.py b/demo/slimfacenet/lfw_eval.py index 44bf0d33bcaa59096cb64d90e59fdc150a5e5160..974fe0783ca4a7f040153514feb7c10851865bf1 100644 --- a/demo/slimfacenet/lfw_eval.py +++ b/demo/slimfacenet/lfw_eval.py @@ -159,7 +159,7 @@ if __name__ == "__main__": train_dataset = CASIA_Face(root=args.train_data_dir) nl, nr, flods, flags = parse_filelist(args.test_data_dir) test_dataset = LFW(nl, nr) - test_reader = paddle.batch( + test_reader = paddle.fluid.io.batch( test_dataset.reader, batch_size=args.test_batchsize, drop_last=False) diff --git a/demo/slimfacenet/train_eval.py b/demo/slimfacenet/train_eval.py index 421496f0463ce99a48b19f483f970f44ceb022dd..901384a7bd2123684db1373b6a2712c7c67b2aa7 100644 --- a/demo/slimfacenet/train_eval.py +++ b/demo/slimfacenet/train_eval.py @@ -166,7 +166,7 @@ def build_program(program, startup, args, is_train=True): image = fluid.data( name='image', shape=[-1, 3, 112, 96], dtype='float32') label = fluid.data(name='label', shape=[-1, 1], dtype='int64') - train_reader = paddle.batch( + train_reader = paddle.fluid.io.batch( train_dataset.reader, batch_size=args.train_batchsize // num_trainers, drop_last=False) @@ -187,7 +187,7 @@ def build_program(program, startup, args, is_train=True): else: nl, nr, flods, flags = parse_filelist(args.test_data_dir) test_dataset = LFW(nl, nr) - test_reader = paddle.batch( + test_reader = paddle.fluid.io.batch( test_dataset.reader, batch_size=args.test_batchsize, drop_last=False) @@ -231,7 +231,7 @@ def build_program(program, startup, args, is_train=True): def quant_val_reader_batch(): nl, nr, flods, flags = parse_filelist(args.test_data_dir) test_dataset = LFW(nl, nr) - test_reader = paddle.batch( + test_reader = paddle.fluid.io.batch( test_dataset.reader, batch_size=1, drop_last=False) shuffle_index = args.seed if args.seed else np.random.randint(1000) print('shuffle_index: {}'.format(shuffle_index)) @@ -347,7 +347,7 @@ def main(): executor=exe) nl, nr, flods, flags = parse_filelist(args.test_data_dir) test_dataset = LFW(nl, nr) - test_reader = paddle.batch( + test_reader = paddle.fluid.io.batch( test_dataset.reader, batch_size=args.test_batchsize, drop_last=False) diff --git a/docs/en/quick_start/distillation_tutorial_en.md b/docs/en/quick_start/distillation_tutorial_en.md index 23ed8d2db98239b8d8d527bdcdca335ba14da759..d948f117b9fb188571b0fc07b677824ab237156e 100755 --- a/docs/en/quick_start/distillation_tutorial_en.md +++ b/docs/en/quick_start/distillation_tutorial_en.md @@ -100,7 +100,7 @@ The package `paddle.dataset.mnist` of Paddle define the downloading and reading Define training data reader and test data reader as below: ```python -train_reader = paddle.batch( +train_reader = paddle.fluid.io.batch( paddle.dataset.mnist.train(), batch_size=128, drop_last=True) train_feeder = fluid.DataFeeder(['image', 'label'], fluid.CPUPlace(), student_program) ``` diff --git a/docs/en/quick_start/nas_tutorial_en.md b/docs/en/quick_start/nas_tutorial_en.md index 63d695b0b611728ba932c236f7b07cd6f419b1a9..040f46530cd61c815f633fdf478b368c0d88e07e 100644 --- a/docs/en/quick_start/nas_tutorial_en.md +++ b/docs/en/quick_start/nas_tutorial_en.md @@ -1,11 +1,11 @@ -# Nerual Architecture Search for Image Classification +# Nerual Architecture Search for Image Classification This tutorial shows how to use [API](../api/nas_api.md) about SANAS in PaddleSlim. We start experiment based on MobileNetV2 as example. The tutorial contains follow section. 1. necessary imports 2. initial SANAS instance 3. define function about building program -4. define function about input data +4. define function about input data 5. define function about training 6. define funciton about evaluation 7. start search @@ -52,7 +52,7 @@ def build_program(archs): acc_top1 = fluid.layers.accuracy(input=softmax_out, label=label, k=1) acc_top5 = fluid.layers.accuracy(input=softmax_out, label=label, k=5) test_program = fluid.default_main_program().clone(for_test=True) - + optimizer = fluid.optimizer.Adam(learning_rate=0.1) optimizer.minimize(avg_cost) @@ -62,13 +62,13 @@ def build_program(archs): return exe, train_program, test_program, (data, label), avg_cost, acc_top1, acc_top5 ``` -## 4. define function about input data -The dataset we used is cifar10, and `paddle.dataset.cifar` in Paddle including the download and pre-read about cifar. +## 4. define function about input data +The dataset we used is cifar10, and `paddle.dataset.cifar` in Paddle including the download and pre-read about cifar. ```python def input_data(inputs): - train_reader = paddle.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024),batch_size=256) + train_reader = paddle.fluid.io.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024),batch_size=256) train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace()) - eval_reader = paddle.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=256) + eval_reader = paddle.fluid.io.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=256) eval_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace()) return train_reader, train_feeder, eval_reader, eval_feeder ``` @@ -76,7 +76,7 @@ def input_data(inputs): ## 5. define function about training Start training. ```python -def start_train(program, data_reader, data_feeder): +def start_train(program, data_reader, data_feeder): outputs = [avg_cost.name, acc_top1.name, acc_top5.name] for data in data_reader(): batch_reward = exe.run(program, feed=data_feeder.feed(data), fetch_list = outputs) @@ -145,7 +145,7 @@ for step in range(3): current_flops = slim.analysis.flops(train_program) if current_flops > 321208544: continue - + for epoch in range(7): start_train(train_program, train_reader, train_feeder) diff --git a/docs/en/quick_start/pruning_tutorial_en.md b/docs/en/quick_start/pruning_tutorial_en.md index e80729455af02b635445e7a8d9d33efe79793e91..9107a38b58255f08ce3da78fa18274c05d844604 100755 --- a/docs/en/quick_start/pruning_tutorial_en.md +++ b/docs/en/quick_start/pruning_tutorial_en.md @@ -74,7 +74,7 @@ Define training data reader and test data reader as below: ``` import paddle.dataset.mnist as reader -train_reader = paddle.batch( +train_reader = paddle.fluid.io.batch( reader.train(), batch_size=128, drop_last=True) train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace()) ``` diff --git a/docs/en/quick_start/quant_aware_tutorial_en.md b/docs/en/quick_start/quant_aware_tutorial_en.md index 938e85f50df7c9b78ea1d90e8b1cfe93c917b5d5..8b169294ce1ae7bde64ed59035b40f7b2f588d0b 100644 --- a/docs/en/quick_start/quant_aware_tutorial_en.md +++ b/docs/en/quick_start/quant_aware_tutorial_en.md @@ -41,9 +41,9 @@ To speed up training process, we select MNIST dataset to train image classificat ```python import paddle.dataset.mnist as reader -train_reader = paddle.batch( +train_reader = paddle.fluid.io.batch( reader.train(), batch_size=128, drop_last=True) -test_reader = paddle.batch( +test_reader = paddle.fluid.io.batch( reader.train(), batch_size=128, drop_last=True) train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace()) ``` diff --git a/docs/en/quick_start/quant_post_tutorial_en.md b/docs/en/quick_start/quant_post_tutorial_en.md index a46329f365a7043661c85b42bb45904b9f74ea3f..e1e3ba4a4e08bdfbd6b79a568f3b7fb1d7fbd050 100644 --- a/docs/en/quick_start/quant_post_tutorial_en.md +++ b/docs/en/quick_start/quant_post_tutorial_en.md @@ -39,9 +39,10 @@ To speed up training process, we select MNIST dataset to train image classificat ```python import paddle.dataset.mnist as reader -train_reader = paddle.batch( +train_reader = paddle.fluid.io.batch( reader.train(), batch_size=128, drop_last=True) -test_reader = paddle.batch( +test_reader = paddle.fluid.io.batch( +cs/en/quick_start/quant_aware_tutorial_en.md reader.train(), batch_size=128, drop_last=True) train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace()) ``` diff --git a/docs/en/tutorials/image_classification_sensitivity_analysis_tutorial_en.md b/docs/en/tutorials/image_classification_sensitivity_analysis_tutorial_en.md index 2b084074d9c9f79cbe013252457f01f448968f5e..043e144a1f122fd9abd1598f35c688f5bc7b6f71 100644 --- a/docs/en/tutorials/image_classification_sensitivity_analysis_tutorial_en.md +++ b/docs/en/tutorials/image_classification_sensitivity_analysis_tutorial_en.md @@ -45,9 +45,9 @@ Show as below: ```python import paddle.dataset.mnist as reader -train_reader = paddle.batch( +train_reader = paddle.fluid.io.batch( reader.train(), batch_size=128, drop_last=True) -test_reader = paddle.batch( +test_reader = paddle.fluid.io.batch( reader.test(), batch_size=128, drop_last=True) data_feeder = fluid.DataFeeder(inputs, place) ``` diff --git a/docs/zh_cn/api_cn/pantheon_api.md b/docs/zh_cn/api_cn/pantheon_api.md index 1de04a6451396149f09c198c206d47181d74546e..87fc67249c5dd91368e0c256cf552fb09aa48685 100644 --- a/docs/zh_cn/api_cn/pantheon_api.md +++ b/docs/zh_cn/api_cn/pantheon_api.md @@ -125,7 +125,7 @@ place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(startup) -train_reader = paddle.batch( +train_reader = paddle.fluid.io.batch( paddle.dataset.cifar.train10(), batch_size=32) teacher = Teacher(out_path="example_knowledge.dat", # offline mode diff --git a/docs/zh_cn/api_cn/prune_api.rst b/docs/zh_cn/api_cn/prune_api.rst index 1da4c72dfa1242dd8a7873a7a8a4309d772b74c4..540dae043d9c81dd3d9f5146cbaf318bbd4e091c 100644 --- a/docs/zh_cn/api_cn/prune_api.rst +++ b/docs/zh_cn/api_cn/prune_api.rst @@ -270,7 +270,7 @@ sensitivity exe = fluid.Executor(place) exe.run(startup_program) - val_reader = paddle.batch(reader.test(), batch_size=128) + val_reader = paddle.fluid.io.batch(reader.test(), batch_size=128) val_feeder = feeder = fluid.DataFeeder( [image, label], place, program=main_program) diff --git a/docs/zh_cn/quick_start/distillation_tutorial.md b/docs/zh_cn/quick_start/distillation_tutorial.md index d998e338afda7a9d607e88ef75c670d273e4cf72..a9b989760c1609cea40faad6482ef2056218db02 100755 --- a/docs/zh_cn/quick_start/distillation_tutorial.md +++ b/docs/zh_cn/quick_start/distillation_tutorial.md @@ -101,7 +101,7 @@ exe.run(student_startup) 为了快速执行该示例,我们选取简单的MNIST数据,Paddle框架的`paddle.dataset.mnist`包定义了MNIST数据的下载和读取。 代码如下: ```python -train_reader = paddle.batch( +train_reader = paddle.fluid.io.batch( paddle.dataset.mnist.train(), batch_size=128, drop_last=True) train_feeder = fluid.DataFeeder(['image', 'label'], fluid.CPUPlace(), student_program) ``` diff --git a/docs/zh_cn/quick_start/nas_tutorial.md b/docs/zh_cn/quick_start/nas_tutorial.md index e68cb7fe1b973cad2ecfd3ea2cec36bf8468fd3e..1dea7c9daa2a09b854de1a308ee8bb6fc7a7f976 100644 --- a/docs/zh_cn/quick_start/nas_tutorial.md +++ b/docs/zh_cn/quick_start/nas_tutorial.md @@ -67,9 +67,9 @@ def build_program(archs): 使用的数据集为cifar10,paddle框架中`paddle.dataset.cifar`包括了cifar数据集的下载和读取,代码如下: ```python def input_data(inputs): - train_reader = paddle.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024),batch_size=256) + train_reader = paddle.fluid.io.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024),batch_size=256) train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace()) - eval_reader = paddle.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=256) + eval_reader = paddle.fluid.io.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=256) eval_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace()) return train_reader, train_feeder, eval_reader, eval_feeder ``` diff --git a/docs/zh_cn/quick_start/pruning_tutorial.md b/docs/zh_cn/quick_start/pruning_tutorial.md index 7545fad64552d79879435ff44f8654c8064fca0c..051a740f4602f2175537efe5eaf22b62a06c10e7 100755 --- a/docs/zh_cn/quick_start/pruning_tutorial.md +++ b/docs/zh_cn/quick_start/pruning_tutorial.md @@ -74,7 +74,7 @@ print("FLOPs: {}".format(FLOPs)) ``` import paddle.dataset.mnist as reader -train_reader = paddle.batch( +train_reader = paddle.fluid.io.batch( reader.train(), batch_size=128, drop_last=True) train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace()) ``` diff --git a/docs/zh_cn/quick_start/quant_aware_tutorial.md b/docs/zh_cn/quick_start/quant_aware_tutorial.md index 5dfc3837aed641accf75c334aa962ae9efb75b66..5ed1e59ffd623d1877f4d65a8f3f6cc10fd7a8f1 100644 --- a/docs/zh_cn/quick_start/quant_aware_tutorial.md +++ b/docs/zh_cn/quick_start/quant_aware_tutorial.md @@ -42,9 +42,9 @@ exe, train_program, val_program, inputs, outputs = \ ```python import paddle.dataset.mnist as reader -train_reader = paddle.batch( +train_reader = paddle.fluid.io.batch( reader.train(), batch_size=128, drop_last=True) -test_reader = paddle.batch( +test_reader = paddle.fluid.io.batch( reader.train(), batch_size=128, drop_last=True) train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace()) ``` diff --git a/docs/zh_cn/quick_start/quant_post_tutorial.md b/docs/zh_cn/quick_start/quant_post_tutorial.md index 5904b872bd4e5f9782ab4b8affd1fdf7ac587fd9..b29a4f4054fe7a1113488aaa3cb8903bc2df946c 100755 --- a/docs/zh_cn/quick_start/quant_post_tutorial.md +++ b/docs/zh_cn/quick_start/quant_post_tutorial.md @@ -40,9 +40,9 @@ exe, train_program, val_program, inputs, outputs = \ ```python import paddle.dataset.mnist as reader -train_reader = paddle.batch( +train_reader = paddle.fluid.io.batch( reader.train(), batch_size=128, drop_last=True) -test_reader = paddle.batch( +test_reader = paddle.fluid.io.batch( reader.train(), batch_size=128, drop_last=True) train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace()) ``` diff --git a/docs/zh_cn/tutorials/image_classification_nas_quick_start.ipynb b/docs/zh_cn/tutorials/image_classification_nas_quick_start.ipynb index c176c262a0f1809bea56ddd4436f182c0935d056..fceccb98fbb201bd873cf34478a7f396110e427a 100644 --- a/docs/zh_cn/tutorials/image_classification_nas_quick_start.ipynb +++ b/docs/zh_cn/tutorials/image_classification_nas_quick_start.ipynb @@ -128,9 +128,9 @@ "outputs": [], "source": [ "def input_data(inputs):\n", - " train_reader = paddle.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024),batch_size=256)\n", + " train_reader = paddle.fluid.io.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024),batch_size=256)\n", " train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())\n", - " eval_reader = paddle.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=256)\n", + " eval_reader = paddle.fluid.io.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=256)\n", " eval_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())\n", " return train_reader, train_feeder, eval_reader, eval_feeder" ] diff --git a/docs/zh_cn/tutorials/image_classification_sensitivity_analysis_tutorial.md b/docs/zh_cn/tutorials/image_classification_sensitivity_analysis_tutorial.md index bfa35d389e714a39e3c9574b4c94c5cd5eb32913..6bde2f8ca4615554152aacf2983c8c5d6f369ff0 100644 --- a/docs/zh_cn/tutorials/image_classification_sensitivity_analysis_tutorial.md +++ b/docs/zh_cn/tutorials/image_classification_sensitivity_analysis_tutorial.md @@ -44,9 +44,9 @@ place = fluid.CUDAPlace(0) ```python import paddle.dataset.mnist as reader -train_reader = paddle.batch( +train_reader = paddle.fluid.io.batch( reader.train(), batch_size=128, drop_last=True) -test_reader = paddle.batch( +test_reader = paddle.fluid.io.batch( reader.test(), batch_size=128, drop_last=True) data_feeder = fluid.DataFeeder(inputs, place) ``` diff --git a/docs/zh_cn/tutorials/sanas_darts_space.ipynb b/docs/zh_cn/tutorials/sanas_darts_space.ipynb index 8cc43df5ca55543d58d49d04e21313045f4c75ec..658124b5ceea1b0e48fe488faffd2fbfa9b6584e 100644 --- a/docs/zh_cn/tutorials/sanas_darts_space.ipynb +++ b/docs/zh_cn/tutorials/sanas_darts_space.ipynb @@ -264,8 +264,8 @@ } ], "source": [ - "train_reader = paddle.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024), batch_size=BATCH_SIZE, drop_last=True)\n", - "test_reader = paddle.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=BATCH_SIZE, drop_last=False)\n", + "train_reader = paddle.fluid.io.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024), batch_size=BATCH_SIZE, drop_last=True)\n", + "test_reader = paddle.fluid.io.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=BATCH_SIZE, drop_last=False)\n", "train_loader.set_sample_list_generator(train_reader, places=place)\n", "test_loader.set_sample_list_generator(test_reader, places=place)" ] diff --git a/docs/zh_cn/tutorials/sanas_darts_space.md b/docs/zh_cn/tutorials/sanas_darts_space.md index 939ded2bfba743e265259a79b7bb1dfb079e70c7..b280db4ef88c5e2e63e398379f771976c03a3d7b 100644 --- a/docs/zh_cn/tutorials/sanas_darts_space.md +++ b/docs/zh_cn/tutorials/sanas_darts_space.md @@ -236,8 +236,8 @@ exe.run(startup_program) **注意:**本示例为了简化代码直接调用`paddle.dataset.cifar10`定义训练数据和预测数据,实际训练需要使用自定义cifar10文件中的reader。 ```python -train_reader = paddle.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024), batch_size=BATCH_SIZE, drop_last=True) -test_reader = paddle.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=BATCH_SIZE, drop_last=False) +train_reader = paddle.fluid.io.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024), batch_size=BATCH_SIZE, drop_last=True) +test_reader = paddle.fluid.io.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=BATCH_SIZE, drop_last=False) train_loader.set_sample_list_generator(train_reader, places=place) test_loader.set_sample_list_generator(test_reader, places=place) ``` diff --git a/tests/test_quant_aware.py b/tests/test_quant_aware.py index 16c93fe15eda513d2c98a99243208c89aee3b3b4..f0a8464f7e688625ff87279ce3fb030f7ba9cff3 100644 --- a/tests/test_quant_aware.py +++ b/tests/test_quant_aware.py @@ -112,9 +112,10 @@ class TestQuantAwareCase2(unittest.TestCase): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) feeder = fluid.DataFeeder([image, label], place, program=main_prog) - train_reader = paddle.batch( + train_reader = paddle.fluid.io.batch( paddle.dataset.mnist.train(), batch_size=64) - eval_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=64) + eval_reader = paddle.fluid.io.batch( + paddle.dataset.mnist.test(), batch_size=64) def train(program): iter = 0 diff --git a/tests/test_quant_post.py b/tests/test_quant_post.py index 17068410109f4651f43382a603b1829eb5769413..2279d5fb7549ea72f41efa6d5662634bcdeafe9b 100644 --- a/tests/test_quant_post.py +++ b/tests/test_quant_post.py @@ -50,9 +50,10 @@ class TestQuantAwareCase1(unittest.TestCase): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) feeder = fluid.DataFeeder([image, label], place, program=main_prog) - train_reader = paddle.batch( + train_reader = paddle.fluid.io.batch( paddle.dataset.mnist.train(), batch_size=64) - eval_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=64) + eval_reader = paddle.fluid.io.batch( + paddle.dataset.mnist.test(), batch_size=64) def train(program): iter = 0 diff --git a/tests/test_quant_post_only_weight.py b/tests/test_quant_post_only_weight.py index 0a09aa0650756325795ebc522c52e926935bc00e..bbb1a00bd6fd0178142968ce0422161217f827bf 100644 --- a/tests/test_quant_post_only_weight.py +++ b/tests/test_quant_post_only_weight.py @@ -50,9 +50,10 @@ class TestQuantPostOnlyWeightCase1(unittest.TestCase): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) feeder = fluid.DataFeeder([image, label], place, program=main_prog) - train_reader = paddle.batch( + train_reader = paddle.fluid.io.batch( paddle.dataset.mnist.train(), batch_size=64) - eval_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=64) + eval_reader = paddle.fluid.io.batch( + paddle.dataset.mnist.test(), batch_size=64) def train(program): iter = 0 diff --git a/tests/test_sensitivity.py b/tests/test_sensitivity.py index e2cfa01d889db2891fd7507b2d4d9aec018a1163..648c09f2a96067c89826f462e7fe955eb1b9b7e8 100644 --- a/tests/test_sensitivity.py +++ b/tests/test_sensitivity.py @@ -44,7 +44,8 @@ class TestSensitivity(unittest.TestCase): exe = fluid.Executor(place) exe.run(startup_program) - val_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=128) + val_reader = paddle.fluid.io.batch( + paddle.dataset.mnist.test(), batch_size=128) def eval_func(program, scope): feeder = fluid.DataFeeder(