提交 08bbc31d 编写于 作者: L Luo Tao

reorganize the catalog

上级 df2ba16e
......@@ -34,7 +34,7 @@
- id: convert-markdown-into-html
name: convert-markdown-into-html
description: Convert README.md into index.html and README.en.md into index.en.html
entry: python pre-commit-hooks/convert_markdown_into_html.py
entry: python .pre-commit-hooks/convert_markdown_into_html.py
language: system
files: .+README(\.en)?\.md$
......@@ -19,7 +19,7 @@ before_install:
- pip install -U virtualenv pre-commit pip
- GOPATH=/tmp/go go get -u github.com/wangkuiyi/ipynb/markdown-to-ipynb
script:
- PATH=/tmp/go/bin:$PATH travis/precommit.sh
- PATH=/tmp/go/bin:$PATH .travis/precommit.sh
notifications:
email:
on_success: change
......
此差异已折叠。
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
import cPickle
import numpy as np
from PIL import Image
from optparse import OptionParser
import paddle.utils.image_util as image_util
from py_paddle import swig_paddle, DataProviderConverter
from paddle.trainer.PyDataProvider2 import dense_vector
from paddle.trainer.config_parser import parse_config
import logging
logging.basicConfig(
format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s')
logging.getLogger().setLevel(logging.INFO)
def vis_square(data, fname):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
"""Take an array of shape (n, height, width) or (n, height, width, 3)
and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)"""
# normalize data for display
data = (data - data.min()) / (data.max() - data.min())
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = (
((0, n**2 - data.shape[0]), (0, 1),
(0, 1)) # add some space between filters
+ ((0, 0), ) *
(data.ndim - 3)) # don't pad the last dimension (if there is one)
data = np.pad(
data, padding, mode='constant',
constant_values=1) # pad with ones (white)
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(
range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
plt.imshow(data, cmap='gray')
plt.savefig(fname)
plt.axis('off')
class ImageClassifier():
def __init__(self,
train_conf,
resize_dim,
crop_dim,
model_dir=None,
use_gpu=True,
mean_file=None,
oversample=False,
is_color=True):
self.train_conf = train_conf
self.model_dir = model_dir
if model_dir is None:
self.model_dir = os.path.dirname(train_conf)
self.resize_dim = resize_dim
self.crop_dims = [crop_dim, crop_dim]
self.oversample = oversample
self.is_color = is_color
self.transformer = image_util.ImageTransformer(is_color=is_color)
self.transformer.set_transpose((2, 0, 1))
self.transformer.set_channel_swap((2, 1, 0))
self.mean_file = mean_file
if self.mean_file is not None:
mean = np.load(self.mean_file)['mean']
mean = mean.reshape(3, self.crop_dims[0], self.crop_dims[1])
self.transformer.set_mean(mean) # mean pixel
else:
# if you use three mean value, set like:
# this three mean value is calculated from ImageNet.
self.transformer.set_mean(np.array([103.939, 116.779, 123.68]))
conf_args = "use_gpu=%d,is_predict=1" % (int(use_gpu))
conf = parse_config(train_conf, conf_args)
swig_paddle.initPaddle("--use_gpu=%d" % (int(use_gpu)))
self.network = swig_paddle.GradientMachine.createFromConfigProto(
conf.model_config)
assert isinstance(self.network, swig_paddle.GradientMachine)
self.network.loadParameters(self.model_dir)
dim = 3 * self.crop_dims[0] * self.crop_dims[1]
slots = [dense_vector(dim)]
self.converter = DataProviderConverter(slots)
def get_data(self, img_path):
"""
1. load image from img_path.
2. resize or oversampling.
3. transformer data: transpose, channel swap, sub mean.
return K x H x W ndarray.
img_path: image path.
"""
image = image_util.load_image(img_path, self.is_color)
# Another way to extract oversampled features is that
# cropping and averaging from large feature map which is
# calculated by large size of image.
# This way reduces the computation.
if self.oversample:
image = image_util.resize_image(image, self.resize_dim)
image = np.array(image)
input = np.zeros(
(1, image.shape[0], image.shape[1], 3), dtype=np.float32)
input[0] = image.astype(np.float32)
input = image_util.oversample(input, self.crop_dims)
else:
image = image.resize(self.crop_dims, Image.ANTIALIAS)
input = np.zeros(
(1, self.crop_dims[0], self.crop_dims[1], 3), dtype=np.float32)
input[0] = np.array(image).astype(np.float32)
data_in = []
for img in input:
img = self.transformer.transformer(img).flatten()
data_in.append([img.tolist()])
return data_in
def forward(self, input_data):
in_arg = self.converter(input_data)
return self.network.forwardTest(in_arg)
def forward(self, data, output_layer):
input = self.converter(data)
self.network.forwardTest(input)
output = self.network.getLayerOutputs(output_layer)
res = {}
if isinstance(output_layer, basestring):
output_layer = [output_layer]
for name in output_layer:
# For oversampling, average predictions across crops.
# If not, the shape of output[name]: (1, class_number),
# the mean is also applicable.
res[name] = output[name].mean(0)
return res
def option_parser():
usage = "%prog -c config -i data_list -w model_dir [options]"
parser = OptionParser(usage="usage: %s" % usage)
parser.add_option(
"--job",
action="store",
dest="job_type",
choices=[
'predict',
'extract',
],
default='predict',
help="The job type. \
predict: predicting,\
extract: extract features")
parser.add_option(
"--conf",
action="store",
dest="train_conf",
default='models/vgg.py',
help="network config")
parser.add_option(
"--data",
action="store",
dest="data_file",
default='image/dog.png',
help="image list")
parser.add_option(
"--model",
action="store",
dest="model_path",
default=None,
help="model path")
parser.add_option(
"-c", dest="cpu_gpu", action="store_false", help="Use cpu mode.")
parser.add_option(
"-g",
dest="cpu_gpu",
default=True,
action="store_true",
help="Use gpu mode.")
parser.add_option(
"--mean",
action="store",
dest="mean",
default='data/mean.meta',
help="The mean file.")
parser.add_option(
"--multi_crop",
action="store_true",
dest="multi_crop",
default=False,
help="Wether to use multiple crops on image.")
return parser.parse_args()
def main():
options, args = option_parser()
mean = 'data/mean.meta' if not options.mean else options.mean
conf = 'models/vgg.py' if not options.train_conf else options.train_conf
obj = ImageClassifier(
conf,
32,
32,
options.model_path,
use_gpu=options.cpu_gpu,
mean_file=mean,
oversample=options.multi_crop)
image_path = options.data_file
if options.job_type == 'predict':
output_layer = '__fc_layer_2__'
data = obj.get_data(image_path)
prob = obj.forward(data, output_layer)
lab = np.argsort(-prob[output_layer])
logging.info("Label of %s is: %d", image_path, lab[0])
elif options.job_type == "extract":
output_layer = '__conv_0__'
data = obj.get_data(options.data_file)
features = obj.forward(data, output_layer)
dshape = (64, 32, 32)
fea = features[output_layer].reshape(dshape)
vis_square(fea, 'fea_conv0.png')
if __name__ == '__main__':
main()
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import cPickle
DATA = "cifar-10-batches-py"
CHANNEL = 3
HEIGHT = 32
WIDTH = 32
def create_mean(dataset):
if not os.path.isfile("mean.meta"):
mean = np.zeros(CHANNEL * HEIGHT * WIDTH)
num = 0
for f in dataset:
batch = np.load(f)
mean += batch['data'].sum(0)
num += len(batch['data'])
mean /= num
print mean.size
data = {"mean": mean, "size": mean.size}
cPickle.dump(
data, open("mean.meta", 'w'), protocol=cPickle.HIGHEST_PROTOCOL)
def create_data():
train_set = [DATA + "/data_batch_%d" % (i + 1) for i in xrange(0, 5)]
test_set = [DATA + "/test_batch"]
# create mean values
create_mean(train_set)
# create dataset lists
if not os.path.isfile("train.txt"):
train = ["data/" + i for i in train_set]
open("train.txt", "w").write("\n".join(train))
open("train.list", "w").write("\n".join(["data/train.txt"]))
if not os.path.isfile("text.txt"):
test = ["data/" + i for i in test_set]
open("test.txt", "w").write("\n".join(test))
open("test.list", "w").write("\n".join(["data/test.txt"]))
if __name__ == '__main__':
create_data()
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
wget https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
tar zxf cifar-10-python.tar.gz
rm cifar-10-python.tar.gz
python cifar10.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import cPickle
from paddle.trainer.PyDataProvider2 import *
def initializer(settings, mean_path, is_train, **kwargs):
settings.is_train = is_train
settings.input_size = 3 * 32 * 32
settings.mean = np.load(mean_path)['mean']
settings.input_types = {
'image': dense_vector(settings.input_size),
'label': integer_value(10)
}
@provider(init_hook=initializer, pool_size=50000)
def process(settings, file_list):
with open(file_list, 'r') as fdata:
for fname in fdata:
fo = open(fname.strip(), 'rb')
batch = cPickle.load(fo)
fo.close()
images = batch['data']
labels = batch['labels']
for im, lab in zip(images, labels):
if settings.is_train and np.random.randint(2):
im = im.reshape(3, 32, 32)
im = im[:, :, ::-1]
im = im.flatten()
im = im - settings.mean
yield {'image': im.astype('float32'), 'label': int(lab)}
#!/bin/bash
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
python classify.py --job=extract --model=output/pass-00299 --data=image/dog.png # -c
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
is_predict = get_config_arg("is_predict", bool, False)
if not is_predict:
args = {'meta': 'data/mean.meta'}
define_py_data_sources2(
train_list='data/train.list',
test_list='data/test.list',
module='dataprovider',
obj='process',
args={'mean_path': 'data/mean.meta'})
settings(
batch_size=128,
learning_rate=0.1 / 128.0,
learning_rate_decay_a=0.1,
learning_rate_decay_b=50000 * 140,
learning_rate_schedule='discexp',
learning_method=MomentumOptimizer(0.9),
regularization=L2Regularization(0.0002 * 128))
def conv_bn_layer(input,
ch_out,
filter_size,
stride,
padding,
active_type=ReluActivation(),
ch_in=None):
tmp = img_conv_layer(
input=input,
filter_size=filter_size,
num_channels=ch_in,
num_filters=ch_out,
stride=stride,
padding=padding,
act=LinearActivation(),
bias_attr=False)
return batch_norm_layer(input=tmp, act=active_type)
def shortcut(ipt, n_in, n_out, stride):
if n_in != n_out:
print("n_in != n_out")
return conv_bn_layer(ipt, n_out, 1, stride, 0, LinearActivation())
else:
return ipt
def basicblock(ipt, ch_out, stride):
ch_in = ipt.num_filters
tmp = conv_bn_layer(ipt, ch_out, 3, stride, 1)
tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, LinearActivation())
short = shortcut(ipt, ch_in, ch_out, stride)
return addto_layer(input=[tmp, short], act=ReluActivation())
def bottleneck(ipt, ch_out, stride):
ch_in = ipt.num_filter
tmp = conv_bn_layer(ipt, ch_out, 1, stride, 0)
tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1)
tmp = conv_bn_layer(tmp, ch_out * 4, 1, 1, 0, LinearActivation())
short = shortcut(ipt, ch_in, ch_out * 4, stride)
return addto_layer(input=[tmp, short], act=ReluActivation())
def layer_warp(block_func, ipt, features, count, stride):
tmp = block_func(ipt, features, stride)
for i in range(1, count):
tmp = block_func(tmp, features, 1)
return tmp
def resnet_imagenet(ipt, depth=50):
cfg = {
18: ([2, 2, 2, 1], basicblock),
34: ([3, 4, 6, 3], basicblock),
50: ([3, 4, 6, 3], bottleneck),
101: ([3, 4, 23, 3], bottleneck),
152: ([3, 8, 36, 3], bottleneck)
}
stages, block_func = cfg[depth]
tmp = conv_bn_layer(
ipt, ch_in=3, ch_out=64, filter_size=7, stride=2, padding=3)
tmp = img_pool_layer(input=tmp, pool_size=3, stride=2)
tmp = layer_warp(block_func, tmp, 64, stages[0], 1)
tmp = layer_warp(block_func, tmp, 128, stages[1], 2)
tmp = layer_warp(block_func, tmp, 256, stages[2], 2)
tmp = layer_warp(block_func, tmp, 512, stages[3], 2)
tmp = img_pool_layer(
input=tmp, pool_size=7, stride=1, pool_type=AvgPooling())
tmp = fc_layer(input=tmp, size=1000, act=SoftmaxActivation())
return tmp
def resnet_cifar10(ipt, depth=32):
#depth should be one of 20, 32, 44, 56, 110, 1202
assert (depth - 2) % 6 == 0
n = (depth - 2) / 6
nStages = {16, 64, 128}
conv1 = conv_bn_layer(
ipt, ch_in=3, ch_out=16, filter_size=3, stride=1, padding=1)
res1 = layer_warp(basicblock, conv1, 16, n, 1)
res2 = layer_warp(basicblock, res1, 32, n, 2)
res3 = layer_warp(basicblock, res2, 64, n, 2)
pool = img_pool_layer(
input=res3, pool_size=8, stride=1, pool_type=AvgPooling())
return pool
datadim = 3 * 32 * 32
classdim = 10
data = data_layer(name='image', size=datadim)
net = resnet_cifar10(data, depth=32)
out = fc_layer(input=net, size=10, act=SoftmaxActivation())
if not is_predict:
lbl = data_layer(name="label", size=classdim)
outputs(classification_cost(input=out, label=lbl))
else:
outputs(out)
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
is_predict = get_config_arg("is_predict", bool, False)
if not is_predict:
define_py_data_sources2(
train_list='data/train.list',
test_list='data/test.list',
module='dataprovider',
obj='process',
args={'mean_path': 'data/mean.meta'})
settings(
batch_size=128,
learning_rate=0.1 / 128.0,
learning_rate_decay_a=0.1,
learning_rate_decay_b=50000 * 100,
learning_rate_schedule='discexp',
learning_method=MomentumOptimizer(0.9),
regularization=L2Regularization(0.0005 * 128), )
def vgg_bn_drop(input):
def conv_block(ipt, num_filter, groups, dropouts, num_channels=None):
return img_conv_group(
input=ipt,
num_channels=num_channels,
pool_size=2,
pool_stride=2,
conv_num_filter=[num_filter] * groups,
conv_filter_size=3,
conv_act=ReluActivation(),
conv_with_batchnorm=True,
conv_batchnorm_drop_rate=dropouts,
pool_type=MaxPooling())
conv1 = conv_block(input, 64, 2, [0.3, 0], 3)
conv2 = conv_block(conv1, 128, 2, [0.4, 0])
conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])
drop = dropout_layer(input=conv5, dropout_rate=0.5)
fc1 = fc_layer(input=drop, size=512, act=LinearActivation())
bn = batch_norm_layer(
input=fc1, act=ReluActivation(), layer_attr=ExtraAttr(drop_rate=0.5))
fc2 = fc_layer(input=bn, size=512, act=LinearActivation())
return fc2
datadim = 3 * 32 * 32
classdim = 10
data = data_layer(name='image', size=datadim)
net = vgg_bn_drop(data)
out = fc_layer(input=net, size=classdim, act=SoftmaxActivation())
if not is_predict:
lbl = data_layer(name="label", size=classdim)
cost = classification_cost(input=out, label=lbl)
outputs(cost)
else:
outputs(out)
#!/bin/bash
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
python classify.py --job=predict --model=output/pass-00299 --data=image/dog.png # -c
#!/bin/bash
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
#cfg=models/resnet.py
cfg=models/vgg.py
output=output
log=train.log
paddle train \
--config=$cfg \
--use_gpu=true \
--trainer_count=1 \
--log_period=100 \
--num_passes=300 \
--save_dir=$output \
2>&1 | tee $log
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册