提交 5d7e7bc0 编写于 作者: L Luo Tao

add test_layer for v2

上级 dabdc690
......@@ -74,9 +74,6 @@ from paddle.trainer_config_helpers.config_parser_utils import \
from paddle.trainer_config_helpers.default_decorators import wrap_name_default
import data_type
import activation
import attr
import pooling
__all__ = [
'parse_network', 'data', 'fc', 'conv_shift', 'img_conv', 'img_pool', 'spp',
......@@ -277,126 +274,3 @@ layer_list = [
]
for l in layer_list:
globals()[l[0]] = __convert_to_v2__(l[1], l[2])
if __name__ == '__main__':
pixel = data(name='pixel', type=data_type.dense_vector(128))
label = data(name='label', type=data_type.integer_value(10))
weight = data(name='weight', type=data_type.dense_vector(10))
word = data(name='word', type=data_type.integer_value(12))
score = data(name='score', type=data_type.dense_vector(1))
hidden = fc(input=pixel,
size=100,
act=activation.Sigmoid(),
param_attr=attr.Param(name='hidden'))
inference = fc(input=hidden, size=10, act=activation.Softmax())
print parse_network(inference)
# test conv layers
conv1 = conv_shift(a=pixel, b=score)
conv2 = img_conv(
input=pixel,
filter_size=1,
filter_size_y=1,
num_channels=8,
num_filters=16,
act=activation.Linear())
print parse_network(conv1, conv2)
# test image pooling layers
maxpool = img_pool(
input=conv2,
pool_size=2,
num_channels=16,
padding=1,
pool_type=pooling.Max())
spp = spp(input=conv2,
pyramid_height=2,
num_channels=16,
pool_type=pooling.Max())
maxout = maxout(input=conv2, num_channels=16, groups=4)
print parse_network(maxpool, spp, maxout)
# test norm layers
norm1 = img_cmrnorm(input=maxpool, size=5)
norm2 = batch_norm(input=maxpool)
norm3 = sum_to_one_norm(input=maxpool)
print parse_network(norm1, norm2, norm3)
# test recurrent layers
recurrent = recurrent(input=word)
lstm = lstmemory(input=word)
gru = grumemory(input=word)
print parse_network(recurrent, lstm, gru)
# test aggregate layers
pool = pool(
input=pixel,
pooling_type=pooling.Avg(),
agg_level=AggregateLevel.EACH_SEQUENCE)
last_seq = last_seq(input=pixel)
first_seq = first_seq(input=pixel)
concat = concat(input=[last_seq, first_seq])
seq_concat = seq_concat(a=last_seq, b=first_seq)
print parse_network(pool, last_seq, first_seq, concat, seq_concat)
# test reshaping layers
block_expand = block_expand(
input=maxout, num_channels=4, stride_x=1, block_x=1)
expand = expand(
input=last_seq, expand_as=pixel, expand_level=ExpandLevel.FROM_TIMESTEP)
repeat = repeat(input=last_seq, num_repeats=4)
reshape = seq_reshape(input=last_seq, reshape_size=4)
rotate = rotate(input=pixel, height=16, width=49)
print parse_network(block_expand, expand, repeat, reshape, rotate)
# test math layers
addto = addto(input=[last_seq, first_seq])
linear_comb = linear_comb(weights=weight, vectors=hidden, size=10)
interpolation = interpolation(input=[hidden, hidden], weight=score)
bilinear = bilinear_interp(input=conv2, out_size_x=4, out_size_y=4)
power = power(input=conv1, weight=score)
scaling = scaling(input=conv1, weight=score)
slope = slope_intercept(input=conv1)
tensor = tensor(a=last_seq, b=first_seq, size=1000)
cos_sim = cos_sim(a=last_seq, b=first_seq)
trans = trans(input=tensor)
print parse_network(addto, linear_comb, interpolation, bilinear, power,
scaling, slope, tensor, cos_sim, trans)
# test sampling layers
maxid = max_id(input=inference)
sampling_id = sampling_id(input=inference)
print parse_network(maxid, sampling_id)
# test slicing and joining layers
pad = pad(input=maxpool, pad_c=[2, 3], pad_h=[1, 2], pad_w=[3, 1])
print parse_network(pad)
# test cost layers
cost1 = classification_cost(input=inference, label=label)
cost2 = classification_cost(input=inference, label=label, weight=weight)
cost3 = cross_entropy_cost(input=inference, label=label)
cost4 = cross_entropy_with_selfnorm_cost(input=inference, label=label)
cost5 = regression_cost(input=inference, label=label)
cost6 = regression_cost(input=inference, label=label, weight=weight)
cost7 = multi_binary_label_cross_entropy_cost(input=inference, label=label)
cost8 = rank_cost(left=score, right=score, label=score)
cost9 = lambda_cost(input=inference, score=score)
cost10 = sum_cost(input=inference)
cost11 = huber_cost(input=score, label=label)
print parse_network(cost3, cost4)
print parse_network(cost5, cost6)
print parse_network(cost7, cost8, cost9, cost10, cost11)
crf = crf(input=inference, label=label)
crf_decoding = crf_decoding(input=inference, size=3)
ctc = ctc(input=inference, label=label)
warp_ctc = warp_ctc(input=pixel, label=label)
nce = nce(input=inference, label=label, num_classes=3)
hsigmoid = hsigmoid(input=inference, label=label, num_classes=3)
print parse_network(crf, crf_decoding, ctc, warp_ctc, nce, hsigmoid)
# test check layers
eos = eos(input=maxid, eos_id=5)
print parse_network(eos)
......@@ -19,18 +19,106 @@ import paddle.v2.activation as activation
import paddle.v2.attr as attr
import paddle.v2.data_type as data_type
import paddle.v2.layer as layer
import paddle.v2.pooling as pooling
from paddle.trainer_config_helpers.config_parser_utils import \
parse_network_config as parse_network
pixel = layer.data(name='pixel', type=data_type.dense_vector(784))
pixel = layer.data(name='pixel', type=data_type.dense_vector(128))
label = layer.data(name='label', type=data_type.integer_value(10))
weight = layer.data(name='weight', type=data_type.dense_vector(10))
score = layer.data(name='score', type=data_type.dense_vector(1))
hidden = layer.fc(input=pixel,
size=100,
act=activation.Sigmoid(),
param_attr=attr.Param(name='hidden'))
inference = layer.fc(input=hidden, size=10, act=activation.Softmax())
conv = layer.img_conv(
input=pixel,
filter_size=1,
filter_size_y=1,
num_channels=8,
num_filters=16,
act=activation.Linear())
class ImageLayerTest(unittest.TestCase):
def test_conv_layer(self):
conv_shift = layer.conv_shift(a=pixel, b=score)
print layer.parse_network(conv, conv_shift)
def test_pooling_layer(self):
maxpool = layer.img_pool(
input=conv,
pool_size=2,
num_channels=16,
padding=1,
pool_type=pooling.Max())
spp = layer.spp(input=conv,
pyramid_height=2,
num_channels=16,
pool_type=pooling.Max())
maxout = layer.maxout(input=conv, num_channels=16, groups=4)
print layer.parse_network(maxpool, spp, maxout)
def test_norm_layer(self):
norm1 = layer.img_cmrnorm(input=conv, size=5)
norm2 = layer.batch_norm(input=conv)
norm3 = layer.sum_to_one_norm(input=conv)
print layer.parse_network(norm1, norm2, norm3)
class AggregateLayerTest(unittest.TestCase):
def test_aggregate_layer(self):
pool = layer.pool(
input=pixel,
pooling_type=pooling.Avg(),
agg_level=layer.AggregateLevel.EACH_SEQUENCE)
last_seq = layer.last_seq(input=pixel)
first_seq = layer.first_seq(input=pixel)
concat = layer.concat(input=[last_seq, first_seq])
seq_concat = layer.seq_concat(a=last_seq, b=first_seq)
print layer.parse_network(pool, last_seq, first_seq, concat, seq_concat)
class MathLayerTest(unittest.TestCase):
def test_math_layer(self):
addto = layer.addto(input=[pixel, pixel])
linear_comb = layer.linear_comb(weights=weight, vectors=hidden, size=10)
interpolation = layer.interpolation(
input=[hidden, hidden], weight=score)
bilinear = layer.bilinear_interp(input=conv, out_size_x=4, out_size_y=4)
power = layer.power(input=pixel, weight=score)
scaling = layer.scaling(input=pixel, weight=score)
slope = layer.slope_intercept(input=pixel)
tensor = layer.tensor(a=pixel, b=pixel, size=1000)
cos_sim = layer.cos_sim(a=pixel, b=pixel)
trans = layer.trans(input=tensor)
print layer.parse_network(addto, linear_comb, interpolation, power,
scaling, slope, tensor, cos_sim, trans)
class ReshapeLayerTest(unittest.TestCase):
def test_reshape_layer(self):
block_expand = layer.block_expand(
input=conv, num_channels=4, stride_x=1, block_x=1)
expand = layer.expand(
input=weight,
expand_as=pixel,
expand_level=layer.ExpandLevel.FROM_TIMESTEP)
repeat = layer.repeat(input=pixel, num_repeats=4)
reshape = layer.seq_reshape(input=pixel, reshape_size=4)
rotate = layer.rotate(input=pixel, height=16, width=49)
print layer.parse_network(block_expand, expand, repeat, reshape, rotate)
class RecurrentLayerTest(unittest.TestCase):
def test_recurrent_layer(self):
word = layer.data(name='word', type=data_type.integer_value(12))
recurrent = layer.recurrent(input=word)
lstm = layer.lstmemory(input=word)
gru = layer.grumemory(input=word)
print layer.parse_network(recurrent, lstm, gru)
class CostLayerTest(unittest.TestCase):
......@@ -51,12 +139,32 @@ class CostLayerTest(unittest.TestCase):
cost10 = layer.sum_cost(input=inference)
cost11 = layer.huber_cost(input=score, label=label)
print dir(layer)
layer.parse_network(cost1, cost2)
print dir(layer)
#print layer.parse_network(cost3, cost4)
#print layer.parse_network(cost5, cost6)
#print layer.parse_network(cost7, cost8, cost9, cost10, cost11)
print layer.parse_network(cost1, cost2)
print layer.parse_network(cost3, cost4)
print layer.parse_network(cost5, cost6)
print layer.parse_network(cost7, cost8, cost9, cost10, cost11)
crf = layer.crf(input=inference, label=label)
crf_decoding = layer.crf_decoding(input=inference, size=3)
ctc = layer.ctc(input=inference, label=label)
warp_ctc = layer.warp_ctc(input=pixel, label=label)
nce = layer.nce(input=inference, label=label, num_classes=3)
hsigmoid = layer.hsigmoid(input=inference, label=label, num_classes=3)
print layer.parse_network(crf, crf_decoding, ctc, warp_ctc, nce,
hsigmoid)
class OtherLayerTest(unittest.TestCase):
def test_sampling_layer(self):
maxid = layer.max_id(input=inference)
sampling_id = layer.sampling_id(input=inference)
eos = layer.eos(input=maxid, eos_id=5)
print layer.parse_network(maxid, sampling_id, eos)
def test_slicing_joining_layer(self):
pad = layer.pad(input=conv, pad_c=[2, 3], pad_h=[1, 2], pad_w=[3, 1])
print layer.parse_network(pad)
if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册