未验证 提交 97a4a77a 编写于 作者: J Jason 提交者: GitHub

Merge pull request #199 from SunAhong1993/develop

fix the normalize
...@@ -246,9 +246,14 @@ class CaffeDecoder(object): ...@@ -246,9 +246,14 @@ class CaffeDecoder(object):
if layer.type == 'PReLU': if layer.type == 'PReLU':
c_o, c_i, h, w = map(int, [1] + \ c_o, c_i, h, w = map(int, [1] + \
list(dims) + [1]* (3 - len(dims))) list(dims) + [1]* (3 - len(dims)))
elif layer.type == 'Normalize':
data = np.asarray(list(blob.data), dtype=np.float32)
transformed.append(data)
continue
else: else:
c_o, c_i, h, w = map(int, [1] * (4 - len(dims)) \ c_o, c_i, h, w = map(int, [1] * (4 - len(dims)) \
+ list(dims)) + list(dims))
else: else:
c_o = blob.num c_o = blob.num
c_i = blob.channels c_i = blob.channels
......
...@@ -12,16 +12,14 @@ def normalize_layer(inputs, ...@@ -12,16 +12,14 @@ def normalize_layer(inputs,
input_shape=None, input_shape=None,
name=None): name=None):
assert across_spatial == False, "Only support across_spatial == False for Normalize" assert across_spatial == False, "Only support across_spatial == False for Normalize"
input = inputs[0] input = inputs[0]
l2_norm = fluid.layers.l2_normalize(input, axis=1, name=name + '_l2') l2_norm = fluid.layers.l2_normalize(input, axis=1, name=name + '_l2')
scale_param = fluid.layers.create_parameter( scale_param = fluid.layers.create_parameter(
shape=[1] shape=[1] if channel_shared else [input_shape[0][1]],
if channel_shared else [input_shape[0][0], 1, 1, input_shape[0][1]],
dtype=input.dtype, dtype=input.dtype,
attr=name + '_scale') attr=name + '_scale')
scale_param = fluid.layers.reshape(x=scale_param, \ scale_param = fluid.layers.reshape(x=scale_param, \
shape=[1] if channel_shared else [input_shape[0][0], 1, 1, input_shape[0][1]]) shape=[1] if channel_shared else [input_shape[0][1]])
out = fluid.layers.elementwise_mul(x=l2_norm, out = fluid.layers.elementwise_mul(x=l2_norm,
y=scale_param, y=scale_param,
axis=-1 if channel_shared else 1) axis=-1 if channel_shared else 1)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册