提交 5b47f3c2 编写于 作者: D dayhaha

follow comment

上级 202184b0
...@@ -2,3 +2,5 @@ data/raw_data ...@@ -2,3 +2,5 @@ data/raw_data
data/train.list data/train.list
data/test.list data/test.list
*.log *.log
*.pyc
plot.png
此差异已折叠。
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
is_predict = get_config_arg("is_predict", bool, False)
####################Data Configuration ##################
if not is_predict:
data_dir = './data/'
define_py_data_sources2(
train_list=data_dir + 'train.list',
test_list=data_dir + 'test.list',
module='mnist_provider',
obj='process')
######################Algorithm Configuration #############
settings(
batch_size=128,
learning_rate=0.1 / 128.0,
learning_method=MomentumOptimizer(0.9),
regularization=L2Regularization(0.0005 * 128))
#######################Network Configuration #############
data_size = 1 * 28 * 28
label_size = 10
img = data_layer(name='pixel', size=data_size)
# first conv layer
conv_pool_1 = simple_img_conv_pool(
input=img,
filter_size=5,
num_filters=20,
num_channel=1,
pool_size=2,
pool_stride=2,
act=TanhActivation())
# second conv layer
conv_pool_2 = simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
num_channel=20,
pool_size=2,
pool_stride=2,
act=TanhActivation())
# The first fully-connected layer
fc1 = fc_layer(input=conv_pool_2, size=128, act=TanhActivation())
# The softmax layer, note that the hidden size should be 10,
# which is the number of unique digits
predict = fc_layer(input=fc1, size=10, act=SoftmaxActivation())
if not is_predict:
lbl = data_layer(name="label", size=label_size)
inputs(img, lbl)
outputs(classification_cost(input=predict, label=lbl))
else:
outputs(predict)
...@@ -31,5 +31,5 @@ done ...@@ -31,5 +31,5 @@ done
cd $DIR cd $DIR
rm -f *.list rm -f *.list
echo "./data/raw_data/train\n" > "$DIR/train.list" echo "./data/raw_data/train" > "$DIR/train.list"
echo "./data/raw_data/t10k\n" > "$DIR/test.list" echo "./data/raw_data/t10k" > "$DIR/test.list"
...@@ -30,8 +30,6 @@ def get_best_pass(filename): ...@@ -30,8 +30,6 @@ def get_best_pass(filename):
filename = sys.argv[1] filename = sys.argv[1]
log = get_best_pass(filename) log = get_best_pass(filename)
predict_error = math.sqrt(float(log[0])) / 2
classification_accuracy = (1 - float(log[1])) * 100 classification_accuracy = (1 - float(log[1])) * 100
print 'Best pass is %s, error is %s, which means predict get error as %f' % ( print 'Best pass is %s, testing Avgcost is %s' % (log[2], log[0])
log[2], log[0], predict_error)
print 'The classification accuracy is %.2f%%' % classification_accuracy print 'The classification accuracy is %.2f%%' % classification_accuracy
...@@ -14,34 +14,28 @@ ...@@ -14,34 +14,28 @@
import numpy as np import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import random import random
import struct
def read_data(path, filename): def read_data(path, filename):
imgf = path + filename + "-images-idx3-ubyte" with open(path + filename + "-images-idx3-ubyte",
labelf = path + filename + "-labels-idx1-ubyte" "rb") as f: # open picture file
f = open(imgf, "rb") magic, n, rows, cols = struct.unpack(">IIII", f.read(16))
l = open(labelf, "rb")
f.read(16)
l.read(8)
# Define number of samples for train/test
n = 60000 if "train" in filename else 10000
rows = 28
cols = 28
images = np.fromfile( images = np.fromfile(
f, 'ubyte', f, 'ubyte',
count=n * rows * cols).reshape(n, rows, cols).astype('float32') count=n * rows * cols).reshape(n, rows, cols).astype('float32')
with open(path + filename + "-labels-idx1-ubyte",
"rb") as l: # open label file
magic, n = struct.unpack(">II", l.read(8))
labels = np.fromfile(l, 'ubyte', count=n).astype("int") labels = np.fromfile(l, 'ubyte', count=n).astype("int")
return images, labels return images, labels
if __name__ == "__main__": if __name__ == "__main__":
train_images, train_labels = read_data("./raw_data/", "train") train_images, train_labels = read_data("./data/raw_data/", "train")
test_images, test_labels = read_data("./raw_data/", "t10k") test_images, test_labels = read_data("./data/raw_data/", "t10k")
label_list = [] label_list = []
for i in range(10): for i in range(10):
index = random.randint(0, train_images.shape[0] - 1) index = random.randint(0, train_images.shape[0] - 1)
......
...@@ -39,13 +39,53 @@ data_size = 1 * 28 * 28 ...@@ -39,13 +39,53 @@ data_size = 1 * 28 * 28
label_size = 10 label_size = 10
img = data_layer(name='pixel', size=data_size) img = data_layer(name='pixel', size=data_size)
# The first fully-connected layer
hidden1 = fc_layer(input=img, size=128, act=ReluActivation()) def softmax_regression(img):
# The second fully-connected layer and the according activation function predict = fc_layer(input=img, size=10, act=SoftmaxActivation())
hidden2 = fc_layer(input=hidden1, size=64, act=ReluActivation()) return predict
# The thrid fully-connected layer, note that the hidden size should be 10,
# which is the number of unique digits
predict = fc_layer(input=hidden2, size=10, act=SoftmaxActivation()) def multilayer_perceptron(img):
# The first fully-connected layer
hidden1 = fc_layer(input=img, size=128, act=ReluActivation())
# The second fully-connected layer and the according activation function
hidden2 = fc_layer(input=hidden1, size=64, act=ReluActivation())
# The thrid fully-connected layer, note that the hidden size should be 10,
# which is the number of unique digits
predict = fc_layer(input=hidden2, size=10, act=SoftmaxActivation())
return predict
def convolutional_neural_network(img):
# first conv layer
conv_pool_1 = simple_img_conv_pool(
input=img,
filter_size=5,
num_filters=20,
num_channel=1,
pool_size=2,
pool_stride=2,
act=TanhActivation())
# second conv layer
conv_pool_2 = simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
num_channel=20,
pool_size=2,
pool_stride=2,
act=TanhActivation())
# The first fully-connected layer
fc1 = fc_layer(input=conv_pool_2, size=128, act=TanhActivation())
# The softmax layer, note that the hidden size should be 10,
# which is the number of unique digits
predict = fc_layer(input=fc1, size=10, act=SoftmaxActivation())
return predict
predict = softmax_regression(img)
#predict = multilayer_perceptron(img)
#predict = convolutional_neural_network(img)
if not is_predict: if not is_predict:
lbl = data_layer(name="label", size=label_size) lbl = data_layer(name="label", size=label_size)
......
...@@ -13,36 +13,25 @@ ...@@ -13,36 +13,25 @@
# limitations under the License. # limitations under the License.
from paddle.trainer.PyDataProvider2 import * from paddle.trainer.PyDataProvider2 import *
import numpy import numpy as np
import struct
# Define a py data provider # Define a py data provider
@provider( @provider(
input_types={'pixel': dense_vector(28 * 28), input_types={'pixel': dense_vector(28 * 28),
'label': integer_value(10)}, 'label': integer_value(10)})
cache=CacheType.CACHE_PASS_IN_MEM)
def process(settings, filename): # settings is not used currently. def process(settings, filename): # settings is not used currently.
imgf = filename + "-images-idx3-ubyte" with open(filename + "-images-idx3-ubyte", "rb") as f: # open picture file
labelf = filename + "-labels-idx1-ubyte" magic, n, rows, cols = struct.unpack(">IIII", f.read(16))
f = open(imgf, "rb") images = np.fromfile(
l = open(labelf, "rb") f, 'ubyte',
count=n * rows * cols).reshape(n, rows, cols).astype('float32')
images = images / 255.0 * 2.0 - 1.0 # normalized to [-1,1]
f.read(16) with open(filename + "-labels-idx1-ubyte", "rb") as l: # open label file
l.read(8) magic, n = struct.unpack(">II", l.read(8))
labels = np.fromfile(l, 'ubyte', count=n).astype("int")
# Define number of samples for train/test
if "train" in filename:
n = 60000
else:
n = 10000
images = numpy.fromfile(
f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28)).astype('float32')
images = images / 255.0 * 2.0 - 1.0
labels = numpy.fromfile(l, 'ubyte', count=n).astype("int")
for i in xrange(n): for i in xrange(n):
yield {"pixel": images[i, :], 'label': labels[i]} yield {"pixel": images[i, :], 'label': labels[i]}
f.close()
l.close()
...@@ -20,15 +20,33 @@ import sys ...@@ -20,15 +20,33 @@ import sys
def plot_log(filename): def plot_log(filename):
with open(filename, 'r') as f: with open(filename, 'r') as f:
text = f.read() text = f.read()
pattern = re.compile('Test.*? cost=([0-9]+\.[0-9]+).*?pass-([0-9]+)', pattern = re.compile(
'AvgCost=([0-9]+\.[0-9]+).*?Test.*? cost=([0-9]+\.[0-9]+).*?pass-([0-9]+)',
re.S) re.S)
results = re.findall(pattern, text) results = re.findall(pattern, text)
cost, pass_ = zip(*results) train_cost, test_cost, pass_ = zip(*results)
cost_float = map(float, cost) train_cost_float = map(float, train_cost)
test_cost_float = map(float, test_cost)
pass_int = map(int, pass_) pass_int = map(int, pass_)
plt.plot(pass_int, cost_float, 'bo', pass_, cost_float, 'k') plt.plot(pass_int, train_cost_float, 'red', label='Train')
plt.plot(pass_int, test_cost_float, 'g--', label='Test')
plt.ylabel('AvgCost') plt.ylabel('AvgCost')
plt.xlabel('epoch') plt.xlabel('Epoch')
# Now add the legend with some customizations.
legend = plt.legend(loc='upper right', shadow=False)
# The frame is matplotlib.patches.Rectangle instance surrounding the legend.
frame = legend.get_frame()
frame.set_facecolor('0.90')
# Set the fontsize
for label in legend.get_texts():
label.set_fontsize('large')
for label in legend.get_lines():
label.set_linewidth(1.5) # the legend line width
plt.show() plt.show()
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Usage: predict.py -c CONF -d DATA -m MODEL """Usage: predict.py -c CONF -d ./data/raw_data/ -m MODEL
Arguments: Arguments:
...@@ -57,8 +57,10 @@ class Prediction(): ...@@ -57,8 +57,10 @@ class Prediction():
output = self.network.forwardTest(input) output = self.network.forwardTest(input)
prob = output[0]["value"] prob = output[0]["value"]
predict = np.argsort(-prob) predict = np.argsort(-prob)
print "Predicted probability of each digit:"
print prob print prob
print predict[0][0], self.labels[index] print "Predict Number: %d" % predict[0][0]
print "Actual Number: %d" % self.labels[index]
def main(): def main():
......
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
is_predict = get_config_arg("is_predict", bool, False)
####################Data Configuration ##################
if not is_predict:
data_dir = './data/'
define_py_data_sources2(
train_list=data_dir + 'train.list',
test_list=data_dir + 'test.list',
module='mnist_provider',
obj='process')
######################Algorithm Configuration #############
settings(
batch_size=128,
learning_rate=0.1 / 128.0,
learning_method=MomentumOptimizer(0.9),
regularization=L2Regularization(0.0005 * 128))
#######################Network Configuration #############
data_size = 1 * 28 * 28
label_size = 10
img = data_layer(name='pixel', size=data_size)
# mlp is used
predict = fc_layer(input=img, size=10, act=SoftmaxActivation())
if not is_predict:
lbl = data_layer(name="label", size=label_size)
inputs(img, lbl)
outputs(classification_cost(input=predict, label=lbl))
else:
outputs(predict)
...@@ -14,9 +14,9 @@ ...@@ -14,9 +14,9 @@
# limitations under the License. # limitations under the License.
set -e set -e
config=cnn_mnist.py config=mnist_model.py
output=./cnn_mnist_model output=./softmax_mnist_model
log=cnn_train.log log=softmax_train.log
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册