未验证 提交 432639d7 编写于 作者: W whs 提交者: GitHub

Rename paddle.batch to paddle.io.batch (#290)

上级 567e90e4
......@@ -116,8 +116,8 @@ def compress(args):
fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
val_reader = paddle.batch(val_reader, batch_size=args.batch_size)
train_reader = paddle.batch(
val_reader = paddle.fluid.io.batch(val_reader, batch_size=args.batch_size)
train_reader = paddle.fluid.io.batch(
train_reader, batch_size=args.batch_size, drop_last=True)
train_feeder = feeder = fluid.DataFeeder([image, label], place)
......
......@@ -34,12 +34,12 @@ add_arg('config_file', str, None, "The config file for comp
model_list = [m for m in dir(models) if "__" not in m]
ratiolist = [
# [0.06, 0.0, 0.09, 0.03, 0.09, 0.02, 0.05, 0.03, 0.0, 0.07, 0.07, 0.05, 0.08],
# [0.08, 0.02, 0.03, 0.13, 0.1, 0.06, 0.03, 0.04, 0.14, 0.02, 0.03, 0.02, 0.01],
]
# [0.06, 0.0, 0.09, 0.03, 0.09, 0.02, 0.05, 0.03, 0.0, 0.07, 0.07, 0.05, 0.08],
# [0.08, 0.02, 0.03, 0.13, 0.1, 0.06, 0.03, 0.04, 0.14, 0.02, 0.03, 0.02, 0.01],
]
def save_model(args, exe, train_prog, eval_prog,info):
def save_model(args, exe, train_prog, eval_prog, info):
model_path = os.path.join(args.model_save_dir, args.model, str(info))
if not os.path.isdir(model_path):
os.makedirs(model_path)
......@@ -58,29 +58,31 @@ def piecewise_decay(args):
regularization=fluid.regularizer.L2Decay(args.l2_decay))
return optimizer
def cosine_decay(args):
step = int(math.ceil(float(args.total_images) / args.batch_size))
learning_rate = fluid.layers.cosine_decay(
learning_rate=args.lr,
step_each_epoch=step,
epochs=args.num_epochs)
learning_rate=args.lr, step_each_epoch=step, epochs=args.num_epochs)
optimizer = fluid.optimizer.Momentum(
learning_rate=learning_rate,
momentum=args.momentum_rate,
regularization=fluid.regularizer.L2Decay(args.l2_decay))
return optimizer
def create_optimizer(args):
if args.lr_strategy == "piecewise_decay":
return piecewise_decay(args)
elif args.lr_strategy == "cosine_decay":
return cosine_decay(args)
def compress(args):
class_dim=1000
image_shape="3,224,224"
class_dim = 1000
image_shape = "3,224,224"
image_shape = [int(m) for m in image_shape.split(",")]
assert args.model in model_list, "{} is not in lists: {}".format(args.model, model_list)
assert args.model in model_list, "{} is not in lists: {}".format(
args.model, model_list)
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# model definition
......@@ -98,18 +100,22 @@ def compress(args):
exe.run(fluid.default_startup_program())
if args.pretrained_model:
def if_exist(var):
exist = os.path.exists(os.path.join(args.pretrained_model, var.name))
print("exist",exist)
exist = os.path.exists(
os.path.join(args.pretrained_model, var.name))
print("exist", exist)
return exist
#fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
val_reader = paddle.batch(reader.val(), batch_size=args.batch_size)
train_reader = paddle.batch(
val_reader = paddle.fluid.io.batch(reader.val(), batch_size=args.batch_size)
train_reader = paddle.fluid.io.batch(
reader.train(), batch_size=args.batch_size, drop_last=True)
train_feeder = feeder = fluid.DataFeeder([image, label], place)
val_feeder = feeder = fluid.DataFeeder([image, label], place, program=val_program)
val_feeder = feeder = fluid.DataFeeder(
[image, label], place, program=val_program)
def test(epoch, program):
batch_id = 0
......@@ -117,80 +123,99 @@ def compress(args):
acc_top5_ns = []
for data in val_reader():
start_time = time.time()
acc_top1_n, acc_top5_n = exe.run(program,
feed=train_feeder.feed(data),
fetch_list=[acc_top1.name, acc_top5.name])
acc_top1_n, acc_top5_n = exe.run(
program,
feed=train_feeder.feed(data),
fetch_list=[acc_top1.name, acc_top5.name])
end_time = time.time()
print("Eval epoch[{}] batch[{}] - acc_top1: {}; acc_top5: {}; time: {}".format(epoch, batch_id, np.mean(acc_top1_n), np.mean(acc_top5_n), end_time-start_time))
print(
"Eval epoch[{}] batch[{}] - acc_top1: {}; acc_top5: {}; time: {}".
format(epoch, batch_id,
np.mean(acc_top1_n),
np.mean(acc_top5_n), end_time - start_time))
acc_top1_ns.append(np.mean(acc_top1_n))
acc_top5_ns.append(np.mean(acc_top5_n))
batch_id += 1
print("Final eval epoch[{}] - acc_top1: {}; acc_top5: {}".format(epoch, np.mean(np.array(acc_top1_ns)), np.mean(np.array(acc_top5_ns))))
print("Final eval epoch[{}] - acc_top1: {}; acc_top5: {}".format(
epoch,
np.mean(np.array(acc_top1_ns)), np.mean(np.array(acc_top5_ns))))
def train(epoch, program):
build_strategy = fluid.BuildStrategy()
exec_strategy = fluid.ExecutionStrategy()
train_program = fluid.compiler.CompiledProgram(
program).with_data_parallel(
loss_name=avg_cost.name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
program).with_data_parallel(
loss_name=avg_cost.name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
batch_id = 0
for data in train_reader():
start_time = time.time()
loss_n, acc_top1_n, acc_top5_n,lr_n = exe.run(train_program,
feed=train_feeder.feed(data),
fetch_list=[avg_cost.name, acc_top1.name, acc_top5.name,"learning_rate"])
loss_n, acc_top1_n, acc_top5_n, lr_n = exe.run(
train_program,
feed=train_feeder.feed(data),
fetch_list=[
avg_cost.name, acc_top1.name, acc_top5.name,
"learning_rate"
])
end_time = time.time()
loss_n = np.mean(loss_n)
acc_top1_n = np.mean(acc_top1_n)
acc_top5_n = np.mean(acc_top5_n)
lr_n = np.mean(lr_n)
print("epoch[{}]-batch[{}] - loss: {}; acc_top1: {}; acc_top5: {};lrn: {}; time: {}".format(epoch, batch_id, loss_n, acc_top1_n, acc_top5_n, lr_n,end_time-start_time))
print(
"epoch[{}]-batch[{}] - loss: {}; acc_top1: {}; acc_top5: {};lrn: {}; time: {}".
format(epoch, batch_id, loss_n, acc_top1_n, acc_top5_n, lr_n,
end_time - start_time))
batch_id += 1
params = []
for param in fluid.default_main_program().global_block().all_parameters():
#if "_weights" in param.name and "conv1_weights" not in param.name:
if "_sep_weights" in param.name:
if "_sep_weights" in param.name:
params.append(param.name)
print("fops before pruning: {}".format(flops(fluid.default_main_program())))
print("fops before pruning: {}".format(
flops(fluid.default_main_program())))
pruned_program_iter = fluid.default_main_program()
pruned_val_program_iter = val_program
for ratios in ratiolist:
pruner = Pruner()
pruned_val_program_iter = pruner.prune(pruned_val_program_iter,
fluid.global_scope(),
params=params,
ratios=ratios,
place=place,
only_graph=True)
pruned_program_iter = pruner.prune(pruned_program_iter,
fluid.global_scope(),
params=params,
ratios=ratios,
place=place)
pruned_val_program_iter = pruner.prune(
pruned_val_program_iter,
fluid.global_scope(),
params=params,
ratios=ratios,
place=place,
only_graph=True)
pruned_program_iter = pruner.prune(
pruned_program_iter,
fluid.global_scope(),
params=params,
ratios=ratios,
place=place)
print("fops after pruning: {}".format(flops(pruned_program_iter)))
""" do not inherit learning rate """
if(os.path.exists(args.pretrained_model + "/learning_rate")):
os.remove( args.pretrained_model + "/learning_rate")
if(os.path.exists(args.pretrained_model + "/@LR_DECAY_COUNTER@")):
os.remove( args.pretrained_model + "/@LR_DECAY_COUNTER@")
fluid.io.load_vars(exe, args.pretrained_model , main_program = pruned_program_iter, predicate=if_exist)
if (os.path.exists(args.pretrained_model + "/learning_rate")):
os.remove(args.pretrained_model + "/learning_rate")
if (os.path.exists(args.pretrained_model + "/@LR_DECAY_COUNTER@")):
os.remove(args.pretrained_model + "/@LR_DECAY_COUNTER@")
fluid.io.load_vars(
exe,
args.pretrained_model,
main_program=pruned_program_iter,
predicate=if_exist)
pruned_program = pruned_program_iter
pruned_val_program = pruned_val_program_iter
for i in range(args.num_epochs):
train(i, pruned_program)
test(i, pruned_val_program)
save_model(args,exe,pruned_program,pruned_val_program,i)
save_model(args, exe, pruned_program, pruned_val_program, i)
def main():
args = parser.parse_args()
......
......@@ -41,9 +41,10 @@ add_arg('test_period', int, 10, "Test period in epoches.")
model_list = [m for m in dir(models) if "__" not in m]
ratiolist = [
# [0.06, 0.0, 0.09, 0.03, 0.09, 0.02, 0.05, 0.03, 0.0, 0.07, 0.07, 0.05, 0.08],
# [0.08, 0.02, 0.03, 0.13, 0.1, 0.06, 0.03, 0.04, 0.14, 0.02, 0.03, 0.02, 0.01],
]
# [0.06, 0.0, 0.09, 0.03, 0.09, 0.02, 0.05, 0.03, 0.0, 0.07, 0.07, 0.05, 0.08],
# [0.08, 0.02, 0.03, 0.13, 0.1, 0.06, 0.03, 0.04, 0.14, 0.02, 0.03, 0.02, 0.01],
]
def piecewise_decay(args):
step = int(math.ceil(float(args.total_images) / args.batch_size))
......@@ -121,8 +122,8 @@ def compress(args):
# fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
val_reader = paddle.batch(val_reader, batch_size=args.batch_size)
train_reader = paddle.batch(
val_reader = paddle.fluid.io.batch(val_reader, batch_size=args.batch_size)
train_reader = paddle.fluid.io.batch(
train_reader, batch_size=args.batch_size, drop_last=True)
train_feeder = feeder = fluid.DataFeeder([image, label], place)
......@@ -194,21 +195,26 @@ def compress(args):
for ratios in ratiolist:
pruner = Pruner()
pruned_val_program_iter = pruner.prune(pruned_val_program_iter,
fluid.global_scope(),
params=params,
ratios=ratios,
place=place,
only_graph=True)
pruned_program_iter = pruner.prune(pruned_program_iter,
fluid.global_scope(),
params=params,
ratios=ratios,
place=place)
pruned_val_program_iter = pruner.prune(
pruned_val_program_iter,
fluid.global_scope(),
params=params,
ratios=ratios,
place=place,
only_graph=True)
pruned_program_iter = pruner.prune(
pruned_program_iter,
fluid.global_scope(),
params=params,
ratios=ratios,
place=place)
print("fops after pruning: {}".format(flops(pruned_program_iter)))
fluid.io.load_vars(exe, args.pretrained_model , main_program = pruned_program_iter, predicate=if_exist)
fluid.io.load_vars(
exe,
args.pretrained_model,
main_program=pruned_program_iter,
predicate=if_exist)
pruner = AutoPruner(
pruned_val_program_iter,
......@@ -238,8 +244,6 @@ def compress(args):
pruner.reward(score)
def main():
args = parser.parse_args()
print_arguments(args)
......
......@@ -133,9 +133,9 @@ def compress(args):
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
train_reader, batch_size=args.batch_size, drop_last=True)
val_reader = paddle.batch(
val_reader = paddle.fluid.io.batch(
val_reader, batch_size=args.batch_size, drop_last=True)
val_program = student_program.clone(for_test=True)
......
......@@ -165,7 +165,7 @@
"metadata": {},
"outputs": [],
"source": [
"train_reader = paddle.batch(\n",
"train_reader = paddle.fluid.io.batch(\n",
" paddle.dataset.mnist.train(), batch_size=128, drop_last=True)\n",
"train_feeder = fluid.DataFeeder(['image', 'label'], fluid.CPUPlace(), student_program)"
]
......
......@@ -137,22 +137,22 @@ def search_mobilenetv2_block(config, args, image_size):
exe.run(startup_program)
if args.data == 'cifar10':
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
paddle.reader.shuffle(
paddle.dataset.cifar.train10(cycle=False), buf_size=1024),
batch_size=args.batch_size,
drop_last=True)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
paddle.dataset.cifar.test10(cycle=False),
batch_size=args.batch_size,
drop_last=False)
elif args.data == 'imagenet':
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
imagenet_reader.train(),
batch_size=args.batch_size,
drop_last=True)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
imagenet_reader.val(),
batch_size=args.batch_size,
drop_last=False)
......
......@@ -114,9 +114,9 @@
" if current_flops > 321208544:\n",
" continue\n",
" \n",
" train_reader = paddle.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024),batch_size=256)\n",
" train_reader = paddle.fluid.io.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024),batch_size=256)\n",
" train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())\n",
" test_reader = paddle.batch(paddle.dataset.cifar.test10(cycle=False),\n",
" test_reader = paddle.fluid.io.batch(paddle.dataset.cifar.test10(cycle=False),\n",
" batch_size=256)\n",
" test_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())\n",
"\n",
......@@ -160,4 +160,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
}
\ No newline at end of file
}
......@@ -105,22 +105,22 @@ def search_mobilenetv2(config, args, image_size, is_server=True):
exe.run(startup_program)
if args.data == 'cifar10':
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
paddle.reader.shuffle(
paddle.dataset.cifar.train10(cycle=False), buf_size=1024),
batch_size=args.batch_size,
drop_last=True)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
paddle.dataset.cifar.test10(cycle=False),
batch_size=args.batch_size,
drop_last=False)
elif args.data == 'imagenet':
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
imagenet_reader.train(),
batch_size=args.batch_size,
drop_last=True)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
imagenet_reader.val(),
batch_size=args.batch_size,
drop_last=False)
......
......@@ -109,22 +109,22 @@ def search_mobilenetv2(config, args, image_size, is_server=True):
exe.run(startup_program)
if args.data == 'cifar10':
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
paddle.reader.shuffle(
paddle.dataset.cifar.train10(cycle=False), buf_size=1024),
batch_size=args.batch_size,
drop_last=True)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
paddle.dataset.cifar.test10(cycle=False),
batch_size=args.batch_size,
drop_last=False)
elif args.data == 'imagenet':
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
imagenet_reader.train(),
batch_size=args.batch_size,
drop_last=True)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
imagenet_reader.val(),
batch_size=args.batch_size,
drop_last=False)
......
......@@ -102,22 +102,22 @@ def search_mobilenetv2(config, args, image_size, is_server=True):
exe.run(startup_program)
if args.data == 'cifar10':
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
paddle.reader.shuffle(
paddle.dataset.cifar.train10(cycle=False), buf_size=1024),
batch_size=args.batch_size,
drop_last=True)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
paddle.dataset.cifar.test10(cycle=False),
batch_size=args.batch_size,
drop_last=False)
elif args.data == 'imagenet':
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
imagenet_reader.train(),
batch_size=args.batch_size,
drop_last=True)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
imagenet_reader.val(),
batch_size=args.batch_size,
drop_last=False)
......@@ -197,22 +197,22 @@ def test_search_result(tokens, image_size, args, config):
exe.run(startup_program)
if args.data == 'cifar10':
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
paddle.reader.shuffle(
paddle.dataset.cifar.train10(cycle=False), buf_size=1024),
batch_size=args.batch_size,
drop_last=True)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
paddle.dataset.cifar.test10(cycle=False),
batch_size=args.batch_size,
drop_last=False)
elif args.data == 'imagenet':
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
imagenet_reader.train(),
batch_size=args.batch_size,
drop_last=True)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
imagenet_reader.val(), batch_size=args.batch_size, drop_last=False)
train_loader.set_sample_list_generator(
......
......@@ -113,7 +113,7 @@ def test_mnist(model, tokens=None):
acc_set = []
avg_loss_set = []
batch_size = 64
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
paddle.dataset.mnist.test(), batch_size=batch_size, drop_last=True)
for batch_id, data in enumerate(test_reader()):
dy_x_data = np.array([x[0].reshape(1, 28, 28)
......@@ -145,7 +145,7 @@ def train_mnist(args, model, tokens=None):
adam = AdamOptimizer(
learning_rate=0.001, parameter_list=model.parameters())
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
paddle.dataset.mnist.train(), batch_size=BATCH_SIZE, drop_last=True)
if args.use_data_parallel:
train_reader = fluid.contrib.reader.distributed_batch_reader(
......
......@@ -63,7 +63,7 @@ def eval(args):
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
val_reader = paddle.batch(val_reader, batch_size=args.batch_size)
val_reader = paddle.fluid.io.batch(val_reader, batch_size=args.batch_size)
val_feeder = feeder = fluid.DataFeeder(
[image, label], place, program=val_program)
......
......@@ -161,7 +161,7 @@
"outputs": [],
"source": [
"import paddle.dataset.mnist as reader\n",
"train_reader = paddle.batch(\n",
"train_reader = paddle.fluid.io.batch(\n",
" reader.train(), batch_size=128, drop_last=True)\n",
"train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())"
]
......
......@@ -142,8 +142,8 @@ def compress(args):
args.pretrained_model))
fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
val_reader = paddle.batch(val_reader, batch_size=args.batch_size)
train_reader = paddle.batch(
val_reader = paddle.fluid.io.batch(val_reader, batch_size=args.batch_size)
train_reader = paddle.fluid.io.batch(
train_reader, batch_size=args.batch_size, drop_last=True)
train_feeder = feeder = fluid.DataFeeder([image, label], place)
......
......@@ -81,9 +81,9 @@
"outputs": [],
"source": [
"import paddle.dataset.mnist as reader\n",
"train_reader = paddle.batch(\n",
"train_reader = paddle.fluid.io.batch(\n",
" reader.train(), batch_size=128, drop_last=True)\n",
"test_reader = paddle.batch(\n",
"test_reader = paddle.fluid.io.batch(\n",
" reader.train(), batch_size=128, drop_last=True)\n",
"train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())"
]
......
......@@ -159,8 +159,8 @@ def compress(args):
fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
val_reader = paddle.batch(val_reader, batch_size=args.batch_size)
train_reader = paddle.batch(
val_reader = paddle.fluid.io.batch(val_reader, batch_size=args.batch_size)
train_reader = paddle.fluid.io.batch(
train_reader, batch_size=args.batch_size, drop_last=True)
train_feeder = feeder = fluid.DataFeeder([image, label], place)
......
......@@ -46,7 +46,7 @@ def eval(args):
exe,
model_filename=args.model_name,
params_filename=args.params_name)
val_reader = paddle.batch(reader.val(), batch_size=128)
val_reader = paddle.fluid.io.batch(reader.val(), batch_size=128)
feeder = fluid.DataFeeder(
place=place, feed_list=feed_target_names, program=val_program)
......
......@@ -79,9 +79,9 @@
"outputs": [],
"source": [
"import paddle.dataset.mnist as reader\n",
"train_reader = paddle.batch(\n",
"train_reader = paddle.fluid.io.batch(\n",
" reader.train(), batch_size=128, drop_last=True)\n",
"test_reader = paddle.batch(\n",
"test_reader = paddle.fluid.io.batch(\n",
" reader.train(), batch_size=128, drop_last=True)\n",
"train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())"
]
......
......@@ -73,9 +73,9 @@
"outputs": [],
"source": [
"import paddle.dataset.mnist as reader\n",
"train_reader = paddle.batch(\n",
"train_reader = paddle.fluid.io.batch(\n",
" reader.train(), batch_size=128, drop_last=True)\n",
"test_reader = paddle.batch(\n",
"test_reader = paddle.fluid.io.batch(\n",
" reader.test(), batch_size=128, drop_last=True)\n",
"data_feeder = fluid.DataFeeder(inputs, place)"
]
......
......@@ -68,7 +68,7 @@ def compress(args):
fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
val_reader = paddle.batch(val_reader, batch_size=args.batch_size)
val_reader = paddle.fluid.io.batch(val_reader, batch_size=args.batch_size)
val_feeder = feeder = fluid.DataFeeder(
[image, label], place, program=val_program)
......
......@@ -119,8 +119,8 @@ def compress(args):
fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
val_reader = paddle.batch(val_reader, batch_size=args.batch_size)
train_reader = paddle.batch(
val_reader = paddle.fluid.io.batch(val_reader, batch_size=args.batch_size)
train_reader = paddle.fluid.io.batch(
train_reader, batch_size=args.batch_size, drop_last=True)
train_feeder = feeder = fluid.DataFeeder([image, label], place)
......
......@@ -117,8 +117,8 @@ def compress(args):
fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
val_reader = paddle.batch(val_reader, batch_size=args.batch_size)
train_reader = paddle.batch(
val_reader = paddle.fluid.io.batch(val_reader, batch_size=args.batch_size)
train_reader = paddle.fluid.io.batch(
train_reader, batch_size=args.batch_size, drop_last=True)
train_feeder = feeder = fluid.DataFeeder([image, label], place)
......
......@@ -79,7 +79,7 @@ if __name__ == '__main__':
dataset = CASIA_Face(root=data_dir)
print(len(dataset))
print(dataset.class_nums)
trainloader = paddle.batch(
trainloader = paddle.fluid.io.batch(
dataset.reader, batch_size=1, drop_last=False)
for i in range(10):
for data in trainloader():
......
......@@ -159,7 +159,7 @@ if __name__ == "__main__":
train_dataset = CASIA_Face(root=args.train_data_dir)
nl, nr, flods, flags = parse_filelist(args.test_data_dir)
test_dataset = LFW(nl, nr)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
test_dataset.reader,
batch_size=args.test_batchsize,
drop_last=False)
......
......@@ -166,7 +166,7 @@ def build_program(program, startup, args, is_train=True):
image = fluid.data(
name='image', shape=[-1, 3, 112, 96], dtype='float32')
label = fluid.data(name='label', shape=[-1, 1], dtype='int64')
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
train_dataset.reader,
batch_size=args.train_batchsize // num_trainers,
drop_last=False)
......@@ -187,7 +187,7 @@ def build_program(program, startup, args, is_train=True):
else:
nl, nr, flods, flags = parse_filelist(args.test_data_dir)
test_dataset = LFW(nl, nr)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
test_dataset.reader,
batch_size=args.test_batchsize,
drop_last=False)
......@@ -231,7 +231,7 @@ def build_program(program, startup, args, is_train=True):
def quant_val_reader_batch():
nl, nr, flods, flags = parse_filelist(args.test_data_dir)
test_dataset = LFW(nl, nr)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
test_dataset.reader, batch_size=1, drop_last=False)
shuffle_index = args.seed if args.seed else np.random.randint(1000)
print('shuffle_index: {}'.format(shuffle_index))
......@@ -347,7 +347,7 @@ def main():
executor=exe)
nl, nr, flods, flags = parse_filelist(args.test_data_dir)
test_dataset = LFW(nl, nr)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
test_dataset.reader,
batch_size=args.test_batchsize,
drop_last=False)
......
......@@ -100,7 +100,7 @@ The package `paddle.dataset.mnist` of Paddle define the downloading and reading
Define training data reader and test data reader as below:
```python
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
paddle.dataset.mnist.train(), batch_size=128, drop_last=True)
train_feeder = fluid.DataFeeder(['image', 'label'], fluid.CPUPlace(), student_program)
```
......
# Nerual Architecture Search for Image Classification
# Nerual Architecture Search for Image Classification
This tutorial shows how to use [API](../api/nas_api.md) about SANAS in PaddleSlim. We start experiment based on MobileNetV2 as example. The tutorial contains follow section.
1. necessary imports
2. initial SANAS instance
3. define function about building program
4. define function about input data
4. define function about input data
5. define function about training
6. define funciton about evaluation
7. start search
......@@ -52,7 +52,7 @@ def build_program(archs):
acc_top1 = fluid.layers.accuracy(input=softmax_out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=softmax_out, label=label, k=5)
test_program = fluid.default_main_program().clone(for_test=True)
optimizer = fluid.optimizer.Adam(learning_rate=0.1)
optimizer.minimize(avg_cost)
......@@ -62,13 +62,13 @@ def build_program(archs):
return exe, train_program, test_program, (data, label), avg_cost, acc_top1, acc_top5
```
## 4. define function about input data
The dataset we used is cifar10, and `paddle.dataset.cifar` in Paddle including the download and pre-read about cifar.
## 4. define function about input data
The dataset we used is cifar10, and `paddle.dataset.cifar` in Paddle including the download and pre-read about cifar.
```python
def input_data(inputs):
train_reader = paddle.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024),batch_size=256)
train_reader = paddle.fluid.io.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024),batch_size=256)
train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())
eval_reader = paddle.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=256)
eval_reader = paddle.fluid.io.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=256)
eval_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())
return train_reader, train_feeder, eval_reader, eval_feeder
```
......@@ -76,7 +76,7 @@ def input_data(inputs):
## 5. define function about training
Start training.
```python
def start_train(program, data_reader, data_feeder):
def start_train(program, data_reader, data_feeder):
outputs = [avg_cost.name, acc_top1.name, acc_top5.name]
for data in data_reader():
batch_reward = exe.run(program, feed=data_feeder.feed(data), fetch_list = outputs)
......@@ -145,7 +145,7 @@ for step in range(3):
current_flops = slim.analysis.flops(train_program)
if current_flops > 321208544:
continue
for epoch in range(7):
start_train(train_program, train_reader, train_feeder)
......
......@@ -74,7 +74,7 @@ Define training data reader and test data reader as below:
```
import paddle.dataset.mnist as reader
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
reader.train(), batch_size=128, drop_last=True)
train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())
```
......
......@@ -41,9 +41,9 @@ To speed up training process, we select MNIST dataset to train image classificat
```python
import paddle.dataset.mnist as reader
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
reader.train(), batch_size=128, drop_last=True)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
reader.train(), batch_size=128, drop_last=True)
train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())
```
......
......@@ -39,9 +39,10 @@ To speed up training process, we select MNIST dataset to train image classificat
```python
import paddle.dataset.mnist as reader
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
reader.train(), batch_size=128, drop_last=True)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
cs/en/quick_start/quant_aware_tutorial_en.md
reader.train(), batch_size=128, drop_last=True)
train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())
```
......
......@@ -45,9 +45,9 @@ Show as below:
```python
import paddle.dataset.mnist as reader
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
reader.train(), batch_size=128, drop_last=True)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
reader.test(), batch_size=128, drop_last=True)
data_feeder = fluid.DataFeeder(inputs, place)
```
......
......@@ -125,7 +125,7 @@ place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup)
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
paddle.dataset.cifar.train10(), batch_size=32)
teacher = Teacher(out_path="example_knowledge.dat", # offline mode
......
......@@ -270,7 +270,7 @@ sensitivity
exe = fluid.Executor(place)
exe.run(startup_program)
val_reader = paddle.batch(reader.test(), batch_size=128)
val_reader = paddle.fluid.io.batch(reader.test(), batch_size=128)
val_feeder = feeder = fluid.DataFeeder(
[image, label], place, program=main_program)
......
......@@ -101,7 +101,7 @@ exe.run(student_startup)
为了快速执行该示例,我们选取简单的MNIST数据,Paddle框架的`paddle.dataset.mnist`包定义了MNIST数据的下载和读取。 代码如下:
```python
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
paddle.dataset.mnist.train(), batch_size=128, drop_last=True)
train_feeder = fluid.DataFeeder(['image', 'label'], fluid.CPUPlace(), student_program)
```
......
......@@ -67,9 +67,9 @@ def build_program(archs):
使用的数据集为cifar10,paddle框架中`paddle.dataset.cifar`包括了cifar数据集的下载和读取,代码如下:
```python
def input_data(inputs):
train_reader = paddle.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024),batch_size=256)
train_reader = paddle.fluid.io.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024),batch_size=256)
train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())
eval_reader = paddle.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=256)
eval_reader = paddle.fluid.io.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=256)
eval_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())
return train_reader, train_feeder, eval_reader, eval_feeder
```
......
......@@ -74,7 +74,7 @@ print("FLOPs: {}".format(FLOPs))
```
import paddle.dataset.mnist as reader
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
reader.train(), batch_size=128, drop_last=True)
train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())
```
......
......@@ -42,9 +42,9 @@ exe, train_program, val_program, inputs, outputs = \
```python
import paddle.dataset.mnist as reader
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
reader.train(), batch_size=128, drop_last=True)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
reader.train(), batch_size=128, drop_last=True)
train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())
```
......
......@@ -40,9 +40,9 @@ exe, train_program, val_program, inputs, outputs = \
```python
import paddle.dataset.mnist as reader
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
reader.train(), batch_size=128, drop_last=True)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
reader.train(), batch_size=128, drop_last=True)
train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())
```
......
......@@ -128,9 +128,9 @@
"outputs": [],
"source": [
"def input_data(inputs):\n",
" train_reader = paddle.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024),batch_size=256)\n",
" train_reader = paddle.fluid.io.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024),batch_size=256)\n",
" train_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())\n",
" eval_reader = paddle.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=256)\n",
" eval_reader = paddle.fluid.io.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=256)\n",
" eval_feeder = fluid.DataFeeder(inputs, fluid.CPUPlace())\n",
" return train_reader, train_feeder, eval_reader, eval_feeder"
]
......
......@@ -44,9 +44,9 @@ place = fluid.CUDAPlace(0)
```python
import paddle.dataset.mnist as reader
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
reader.train(), batch_size=128, drop_last=True)
test_reader = paddle.batch(
test_reader = paddle.fluid.io.batch(
reader.test(), batch_size=128, drop_last=True)
data_feeder = fluid.DataFeeder(inputs, place)
```
......
......@@ -264,8 +264,8 @@
}
],
"source": [
"train_reader = paddle.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024), batch_size=BATCH_SIZE, drop_last=True)\n",
"test_reader = paddle.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=BATCH_SIZE, drop_last=False)\n",
"train_reader = paddle.fluid.io.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024), batch_size=BATCH_SIZE, drop_last=True)\n",
"test_reader = paddle.fluid.io.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=BATCH_SIZE, drop_last=False)\n",
"train_loader.set_sample_list_generator(train_reader, places=place)\n",
"test_loader.set_sample_list_generator(test_reader, places=place)"
]
......
......@@ -236,8 +236,8 @@ exe.run(startup_program)
**注意:**本示例为了简化代码直接调用`paddle.dataset.cifar10`定义训练数据和预测数据,实际训练需要使用自定义cifar10文件中的reader。
```python
train_reader = paddle.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024), batch_size=BATCH_SIZE, drop_last=True)
test_reader = paddle.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=BATCH_SIZE, drop_last=False)
train_reader = paddle.fluid.io.batch(paddle.reader.shuffle(paddle.dataset.cifar.train10(cycle=False), buf_size=1024), batch_size=BATCH_SIZE, drop_last=True)
test_reader = paddle.fluid.io.batch(paddle.dataset.cifar.test10(cycle=False), batch_size=BATCH_SIZE, drop_last=False)
train_loader.set_sample_list_generator(train_reader, places=place)
test_loader.set_sample_list_generator(test_reader, places=place)
```
......
......@@ -112,9 +112,10 @@ class TestQuantAwareCase2(unittest.TestCase):
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
feeder = fluid.DataFeeder([image, label], place, program=main_prog)
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
paddle.dataset.mnist.train(), batch_size=64)
eval_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=64)
eval_reader = paddle.fluid.io.batch(
paddle.dataset.mnist.test(), batch_size=64)
def train(program):
iter = 0
......
......@@ -50,9 +50,10 @@ class TestQuantAwareCase1(unittest.TestCase):
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
feeder = fluid.DataFeeder([image, label], place, program=main_prog)
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
paddle.dataset.mnist.train(), batch_size=64)
eval_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=64)
eval_reader = paddle.fluid.io.batch(
paddle.dataset.mnist.test(), batch_size=64)
def train(program):
iter = 0
......
......@@ -50,9 +50,10 @@ class TestQuantPostOnlyWeightCase1(unittest.TestCase):
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
feeder = fluid.DataFeeder([image, label], place, program=main_prog)
train_reader = paddle.batch(
train_reader = paddle.fluid.io.batch(
paddle.dataset.mnist.train(), batch_size=64)
eval_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=64)
eval_reader = paddle.fluid.io.batch(
paddle.dataset.mnist.test(), batch_size=64)
def train(program):
iter = 0
......
......@@ -44,7 +44,8 @@ class TestSensitivity(unittest.TestCase):
exe = fluid.Executor(place)
exe.run(startup_program)
val_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=128)
val_reader = paddle.fluid.io.batch(
paddle.dataset.mnist.test(), batch_size=128)
def eval_func(program, scope):
feeder = fluid.DataFeeder(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册