提交 d6bda8d7 编写于 作者: 别团等shy哥发育's avatar 别团等shy哥发育

ResNet50V2模型复现

上级 a7ac7320
import tensorflow as tf
import tensorflow.keras.layers as layers
from tensorflow.keras.models import Model
from plot_model import plot_model
"""A residual block.
Arguments:
x: input tensor.
filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer.
stride: default 1, stride of the first layer.
conv_shortcut: default False, use convolution shortcut if True,
otherwise identity shortcut.
name: string, block label.
Returns:
Output tensor for the residual block.
"""
def block2(x, filters, kernel_size=3, stride=1, conv_shortcut=False, name=None):
preact = layers.BatchNormalization(name=name + '_preact_bn')(x)
preact = layers.Activation('relu', name=name + '_preact_relu')(preact)
if conv_shortcut:
shortcut = layers.Conv2D(4 * filters, 1, strides=stride, name=name + '_0_conv')(preact)
else:
shortcut = layers.MaxPooling2D(1, strides=stride)(x) if stride > 1 else x
x = layers.Conv2D(filters, 1, strides=1, use_bias=False, name=name + '_1_conv')(preact)
x = layers.BatchNormalization(name=name + '_1_bn')(x)
x = layers.Activation('relu', name=name + '_1_relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)
x = layers.Conv2D(filters,
kernel_size,
strides=stride,
use_bias=False,
name=name + '_2_conv')(x)
x = layers.BatchNormalization(name=name + '_2_bn')(x)
x = layers.Activation('relu', name=name + '_2_relu')(x)
x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)
x = layers.Add(name=name + '_out')([shortcut, x])
return x
def stack2(x, filters, blocks, stride1=2, name=None):
x = block2(x, filters, conv_shortcut=True, name=name + '_block1')
for i in range(2, blocks):
x = block2(x, filters, name=name + '_block' + str(i))
x = block2(x, filters, stride=stride1, name=name + '_block' + str(blocks))
return x
def ResNet50V2(include_top=True, # 是否包含位于网络顶部的全连接层
preact=True, # 是否使用预激活
use_bias=True, # 是否对卷积层使用偏置
weights='imagenet',
input_tensor=None, # 可选的keras张量,用作模型的图像输入
input_shape=None,
pooling=None,
classes=1000, # 用于分类图像的可选类数
classifier_activation='softmax'):# 分类层激活函数
img_input = layers.Input(shape=input_shape)
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(img_input)
x = layers.Conv2D(64, 7, strides=2, use_bias=use_bias, name='conv1_conv')(x)
if not preact:
x = layers.BatchNormalization(name='conv1_bn')(x)
x = layers.Activation('relu', name='conv1_relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
x = layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = stack2(x, 64, 3, name='conv2')
x = stack2(x, 128, 4, name='conv3')
x = stack2(x, 256, 6, name='conv4')
x = stack2(x, 512, 3, stride1=1, name='conv5')
if preact:
x = layers.BatchNormalization(name='post_bn')(x)
x = layers.Activation('relu', name='post_relu')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = layers.Dense(classes, activation=classifier_activation, name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
model = Model(img_input, x)
return model
if __name__ == '__main__':
model = ResNet50V2(input_shape=(224, 224, 3))
model.summary()
plot_model(model,to_file='../img/resnet50v2.png')
此差异已折叠。
import tensorflow as tf
import tensorflow.keras.layers as layers
from tensorflow.keras.models import Model
from plot_model import plot_model
"""A residual block.
Arguments:
x: input tensor.
filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer.
stride: default 1, stride of the first layer.
conv_shortcut: default False, use convolution shortcut if True,
otherwise identity shortcut.
name: string, block label.
Returns:
Output tensor for the residual block.
"""
def block2(x, filters, kernel_size=3, stride=1, conv_shortcut=False, name=None):
preact = layers.BatchNormalization(name=name + '_preact_bn')(x)
preact = layers.Activation('relu', name=name + '_preact_relu')(preact)
if conv_shortcut:
shortcut = layers.Conv2D(4 * filters, 1, strides=stride, name=name + '_0_conv')(preact)
else:
shortcut = layers.MaxPooling2D(1, strides=stride)(x) if stride > 1 else x
x = layers.Conv2D(filters, 1, strides=1, use_bias=False, name=name + '_1_conv')(preact)
x = layers.BatchNormalization(name=name + '_1_bn')(x)
x = layers.Activation('relu', name=name + '_1_relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)
x = layers.Conv2D(filters,
kernel_size,
strides=stride,
use_bias=False,
name=name + '_2_conv')(x)
x = layers.BatchNormalization(name=name + '_2_bn')(x)
x = layers.Activation('relu', name=name + '_2_relu')(x)
x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)
x = layers.Add(name=name + '_out')([shortcut, x])
return x
def stack2(x, filters, blocks, stride1=2, name=None):
x = block2(x, filters, conv_shortcut=True, name=name + '_block1')
for i in range(2, blocks):
x = block2(x, filters, name=name + '_block' + str(i))
x = block2(x, filters, stride=stride1, name=name + '_block' + str(blocks))
return x
def ResNet50V2(include_top=True, # 是否包含位于网络顶部的全连接层
preact=True, # 是否使用预激活
use_bias=True, # 是否对卷积层使用偏置
weights='imagenet',
input_tensor=None, # 可选的keras张量,用作模型的图像输入
input_shape=None,
pooling=None,
classes=1000, # 用于分类图像的可选类数
classifier_activation='softmax'):# 分类层激活函数
img_input = layers.Input(shape=input_shape)
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(img_input)
x = layers.Conv2D(64, 7, strides=2, use_bias=use_bias, name='conv1_conv')(x)
if not preact:
x = layers.BatchNormalization(name='conv1_bn')(x)
x = layers.Activation('relu', name='conv1_relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
x = layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = stack2(x, 64, 3, name='conv2')
x = stack2(x, 128, 4, name='conv3')
x = stack2(x, 256, 6, name='conv4')
x = stack2(x, 512, 3, stride1=1, name='conv5')
if preact:
x = layers.BatchNormalization(name='post_bn')(x)
x = layers.Activation('relu', name='post_relu')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = layers.Dense(classes, activation=classifier_activation, name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
model = Model(img_input, x)
return model
if __name__ == '__main__':
model = ResNet50V2(input_shape=(224, 224, 3))
model.summary()
plot_model(model,to_file='../img/resnet50v2.png')
此差异已折叠。
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.keras import layers
from plot_model import plot_model
def SKConv(M=2, r=16, L=32, G=32, name='skconv'):
def wrapper(inputs):
inputs_shape = tf.shape(inputs)
b, h, w = inputs_shape[0], inputs_shape[1], inputs_shape[2]
filters = inputs.get_shape().as_list()[-1]
d = max(filters//r, L)
x = inputs
xs = []
for m in range(1, M+1):
if G == 1:
_x = layers.Conv2D(filters, 3, dilation_rate=m, padding='same',
use_bias=False, name=name+'_conv%d'%m)(x)
else:
c = filters // G
_x = layers.DepthwiseConv2D(3, dilation_rate=m, depth_multiplier=c, padding='same',
use_bias=False, name=name+'_conv%d'%m)(x)
_x = layers.Reshape([h, w, G, c, c], name=name+'_conv%d_reshape1'%m)(_x)
_x = layers.Lambda(lambda x: tf.reduce_sum(x, axis=-1),
output_shape=[b, h, w, G, c],
name=name+'_conv%d_sum'%m)(_x)
_x = layers.Reshape([h, w, filters],
name=name+'_conv%d_reshape2'%m)(_x)
_x = layers.BatchNormalization(name=name+'_conv%d_bn'%m)(_x)
_x = layers.Activation('relu', name=name+'_conv%d_relu'%m)(_x)
xs.append(_x)
U = layers.Add(name=name+'_add')(xs)
s = layers.Lambda(lambda x: tf.reduce_mean(x, axis=[1,2], keepdims=True),
output_shape=[b, 1, 1, filters],
name=name+'_gap')(U)
z = layers.Conv2D(d, 1, name=name+'_fc_z')(s)
z = layers.BatchNormalization(name=name+'_fc_z_bn')(z)
z = layers.Activation('relu', name=name+'_fc_z_relu')(z)
x = layers.Conv2D(filters*M, 1, name=name+'_fc_x')(z)
x = layers.Reshape([1, 1, filters, M],name=name+'_reshape')(x)
scale = layers.Softmax(name=name+'_softmax')(x)
x = layers.Lambda(lambda x: tf.stack(x, axis=-1),
output_shape=[b, h, w, filters, M],
name=name+'_stack')(xs) # b, h, w, c, M
x = Axpby(name=name+'_axpby')([scale, x])
return x
return wrapper
class Axpby(layers.Layer):
"""
Do this:
F = a * X + b * Y + ...
Shape info:
a: B x 1 x 1 x C
X: B x H x W x C
b: B x 1 x 1 x C
Y: B x H x W x C
...
F: B x H x W x C
"""
def __init__(self, **kwargs):
super(Axpby, self).__init__(**kwargs)
def build(self, input_shape):
super(Axpby, self).build(input_shape) # Be sure to call this at the end
def call(self, inputs):
""" scale: [B, 1, 1, C, M]
x: [B, H, W, C, M]
"""
scale, x = inputs
f = tf.multiply(scale, x, name='product')
f = tf.reduce_sum(f, axis=-1, name='sum')
return f
def compute_output_shape(self, input_shape):
return input_shape[0:4]
if __name__ == '__main__':
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
inputs = Input([224, 224, 32])
x = SKConv(3, G=1)(inputs)
m = Model(inputs, x)
m.summary()
plot_model(m,to_file='img/SKNet.png')
import numpy as np
X = np.random.random([2, 224, 224, 32]).astype(np.float32)
y = m.predict(X)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册