diff --git a/benchmark/paddle/image/alexnet.py b/benchmark/paddle/image/alexnet.py index b0beef8ca71fb6ea323994e1243b20888876e3f0..77d130ae34059d1e87040d00346ac1dadd86b0d8 100644 --- a/benchmark/paddle/image/alexnet.py +++ b/benchmark/paddle/image/alexnet.py @@ -6,7 +6,7 @@ height = 227 width = 227 num_class = 1000 batch_size = get_config_arg('batch_size', int, 128) -use_mkldnn = get_config_arg('use_mkldnn', bool, False) +gp = get_config_arg('layer_num', int, 1) is_infer = get_config_arg("is_infer", bool, False) num_samples = get_config_arg('num_samples', int, 2560) @@ -41,12 +41,7 @@ net = img_pool_layer(input=net, pool_size=3, stride=2) # conv2 net = img_conv_layer( - input=net, - filter_size=5, - num_filters=256, - stride=1, - padding=2, - groups=2 if use_mkldnn else 1) + input=net, filter_size=5, num_filters=256, stride=1, padding=2, groups=gp) net = img_cmrnorm_layer(input=net, size=5, scale=0.0001, power=0.75) net = img_pool_layer(input=net, pool_size=3, stride=2) @@ -55,21 +50,11 @@ net = img_conv_layer( input=net, filter_size=3, num_filters=384, stride=1, padding=1) # conv4 net = img_conv_layer( - input=net, - filter_size=3, - num_filters=384, - stride=1, - padding=1, - groups=2 if use_mkldnn else 1) + input=net, filter_size=3, num_filters=384, stride=1, padding=1, groups=gp) # conv5 net = img_conv_layer( - input=net, - filter_size=3, - num_filters=256, - stride=1, - padding=1, - groups=2 if use_mkldnn else 1) + input=net, filter_size=3, num_filters=256, stride=1, padding=1, groups=gp) net = img_pool_layer(input=net, pool_size=3, stride=2) net = fc_layer( @@ -84,6 +69,9 @@ net = fc_layer( layer_attr=ExtraAttr(drop_rate=0.5)) net = fc_layer(input=net, size=1000, act=SoftmaxActivation()) -lab = data_layer('label', num_class) -loss = cross_entropy(input=net, label=lab) -outputs(loss) +if is_infer: + outputs(net) +else: + lab = data_layer('label', num_class) + loss = cross_entropy(input=net, label=lab) + outputs(loss) diff --git a/benchmark/paddle/image/run_mkl_infer.sh b/benchmark/paddle/image/run_mkl_infer.sh index 00942e32a558087b891c984507ef99b48228fed6..a3b5e2db5e9b3fec906bce07b7450f19fcd3f9f6 100755 --- a/benchmark/paddle/image/run_mkl_infer.sh +++ b/benchmark/paddle/image/run_mkl_infer.sh @@ -79,7 +79,7 @@ fi # inference benchmark for use_mkldnn in True False; do for batchsize in 1 2 4 8 16; do - infer alexnet group2 $batchsize $use_mkldnn + infer alexnet 2 $batchsize $use_mkldnn infer googlenet v1 $batchsize $use_mkldnn infer resnet 50 $batchsize $use_mkldnn infer vgg 19 $batchsize $use_mkldnn diff --git a/benchmark/paddle/image/run_mkl_train.sh b/benchmark/paddle/image/run_mkl_train.sh index c38b3e3621e97ccacbe5735be170ec8cfc795987..03d2d378fb72e36f765d89af788f6ee96fe21d4e 100755 --- a/benchmark/paddle/image/run_mkl_train.sh +++ b/benchmark/paddle/image/run_mkl_train.sh @@ -47,6 +47,6 @@ for use_mkldnn in True False; do train vgg 19 $batchsize $use_mkldnn train resnet 50 $batchsize $use_mkldnn train googlenet v1 $batchsize $use_mkldnn - train alexnet group2 $batchsize $use_mkldnn + train alexnet 2 $batchsize $use_mkldnn done done diff --git a/benchmark/paddle/image/run_openblas_infer.sh b/benchmark/paddle/image/run_openblas_infer.sh index 3dad42ee0dfac50bbed93a474348c45b856e9c3d..ec9235e2c27691973192561ad1dcf10977a0cfaa 100755 --- a/benchmark/paddle/image/run_openblas_infer.sh +++ b/benchmark/paddle/image/run_openblas_infer.sh @@ -56,7 +56,7 @@ fi # inference benchmark for batchsize in 1 2 4 8 16; do - infer alexnet group2 $batchsize $use_mkldnn + infer alexnet 2 $batchsize $use_mkldnn infer googlenet v1 $batchsize infer resnet 50 $batchsize infer vgg 19 $batchsize diff --git a/benchmark/paddle/image/run_openblas_train.sh b/benchmark/paddle/image/run_openblas_train.sh index caea5548c3bc993330e99c827a3b0c38562b8031..1e007be9663cdf34ca50245b0c15348628670d10 100755 --- a/benchmark/paddle/image/run_openblas_train.sh +++ b/benchmark/paddle/image/run_openblas_train.sh @@ -36,5 +36,5 @@ for batchsize in 64 128 256; do train vgg 19 $batchsize train resnet 50 $batchsize train googlenet v1 $batchsize - train alexnet group2 $batchsize $use_mkldnn + train alexnet 2 $batchsize $use_mkldnn done