Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
谢有为
Python专题
提交
79ee1180
Python专题
项目概览
谢有为
/
Python专题
与 Fork 源项目一致
Fork自
GitCode官方 / Python专题
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
Python专题
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
79ee1180
编写于
8月 10, 2021
作者:
M
MaoXianxin
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
tensorflow classification
上级
6b9f1d6d
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
767 addition
and
0 deletion
+767
-0
CV_Classification/custom_augmentation.py
CV_Classification/custom_augmentation.py
+443
-0
CV_Classification/feature_extraction.py
CV_Classification/feature_extraction.py
+151
-0
CV_Classification/finetune.py
CV_Classification/finetune.py
+165
-0
CV_Classification/run.sh
CV_Classification/run.sh
+8
-0
未找到文件。
CV_Classification/custom_augmentation.py
0 → 100644
浏览文件 @
79ee1180
import
numpy
as
np
from
tensorflow.python.eager
import
context
from
tensorflow.python.compat
import
compat
from
tensorflow.python.framework
import
dtypes
from
tensorflow.python.framework
import
ops
from
tensorflow.python.framework
import
tensor_shape
from
tensorflow.python.framework
import
tensor_util
from
tensorflow.python.keras
import
backend
from
tensorflow.python.keras.engine
import
base_preprocessing_layer
from
tensorflow.python.keras.engine.base_preprocessing_layer
import
PreprocessingLayer
from
tensorflow.python.keras.engine.input_spec
import
InputSpec
from
tensorflow.python.keras.utils
import
control_flow_util
from
tensorflow.python.ops
import
array_ops
from
tensorflow.python.ops
import
check_ops
from
tensorflow.python.ops
import
control_flow_ops
from
tensorflow.python.ops
import
gen_image_ops
from
tensorflow.python.ops
import
image_ops
from
tensorflow.python.ops
import
math_ops
from
tensorflow.python.ops
import
stateful_random_ops
from
tensorflow.python.ops
import
stateless_random_ops
from
tensorflow.python.util.tf_export
import
keras_export
from
tensorflow.keras.layers.experimental.preprocessing
import
*
import
tensorflow
as
tf
ResizeMethod
=
image_ops
.
ResizeMethod
_RESIZE_METHODS
=
{
'bilinear'
:
ResizeMethod
.
BILINEAR
,
'nearest'
:
ResizeMethod
.
NEAREST_NEIGHBOR
,
'bicubic'
:
ResizeMethod
.
BICUBIC
,
'area'
:
ResizeMethod
.
AREA
,
'lanczos3'
:
ResizeMethod
.
LANCZOS3
,
'lanczos5'
:
ResizeMethod
.
LANCZOS5
,
'gaussian'
:
ResizeMethod
.
GAUSSIAN
,
'mitchellcubic'
:
ResizeMethod
.
MITCHELLCUBIC
}
H_AXIS
=
1
W_AXIS
=
2
def
check_fill_mode_and_interpolation
(
fill_mode
,
interpolation
):
if
fill_mode
not
in
{
'reflect'
,
'wrap'
,
'constant'
,
'nearest'
}:
raise
NotImplementedError
(
'Unknown `fill_mode` {}. Only `reflect`, `wrap`, '
'`constant` and `nearest` are supported.'
.
format
(
fill_mode
))
if
interpolation
not
in
{
'nearest'
,
'bilinear'
}:
raise
NotImplementedError
(
'Unknown `interpolation` {}. Only `nearest` and '
'`bilinear` are supported.'
.
format
(
interpolation
))
def
get_rotation_matrix
(
angles
,
image_height
,
image_width
,
name
=
None
):
"""Returns projective transform(s) for the given angle(s).
Args:
angles: A scalar angle to rotate all images by, or (for batches of images) a
vector with an angle to rotate each image in the batch. The rank must be
statically known (the shape is not `TensorShape(None)`).
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
name: The name of the op.
Returns:
A tensor of shape (num_images, 8). Projective transforms which can be given
to operation `image_projective_transform_v2`. If one row of transforms is
[a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`.
"""
with
backend
.
name_scope
(
name
or
'rotation_matrix'
):
x_offset
=
((
image_width
-
1
)
-
(
math_ops
.
cos
(
angles
)
*
(
image_width
-
1
)
-
math_ops
.
sin
(
angles
)
*
(
image_height
-
1
)))
/
2.0
y_offset
=
((
image_height
-
1
)
-
(
math_ops
.
sin
(
angles
)
*
(
image_width
-
1
)
+
math_ops
.
cos
(
angles
)
*
(
image_height
-
1
)))
/
2.0
num_angles
=
array_ops
.
shape
(
angles
)[
0
]
return
array_ops
.
concat
(
values
=
[
math_ops
.
cos
(
angles
)[:,
None
],
-
math_ops
.
sin
(
angles
)[:,
None
],
x_offset
[:,
None
],
math_ops
.
sin
(
angles
)[:,
None
],
math_ops
.
cos
(
angles
)[:,
None
],
y_offset
[:,
None
],
array_ops
.
zeros
((
num_angles
,
2
),
dtypes
.
float32
),
],
axis
=
1
)
def
get_translation_matrix
(
translations
,
name
=
None
):
"""Returns projective transform(s) for the given translation(s).
Args:
translations: A matrix of 2-element lists representing [dx, dy] to translate
for each image (for a batch of images).
name: The name of the op.
Returns:
A tensor of shape (num_images, 8) projective transforms which can be given
to `transform`.
"""
with
backend
.
name_scope
(
name
or
'translation_matrix'
):
num_translations
=
array_ops
.
shape
(
translations
)[
0
]
# The translation matrix looks like:
# [[1 0 -dx]
# [0 1 -dy]
# [0 0 1]]
# where the last entry is implicit.
# Translation matrices are always float32.
return
array_ops
.
concat
(
values
=
[
array_ops
.
ones
((
num_translations
,
1
),
dtypes
.
float32
),
array_ops
.
zeros
((
num_translations
,
1
),
dtypes
.
float32
),
-
translations
[:,
0
,
None
],
array_ops
.
zeros
((
num_translations
,
1
),
dtypes
.
float32
),
array_ops
.
ones
((
num_translations
,
1
),
dtypes
.
float32
),
-
translations
[:,
1
,
None
],
array_ops
.
zeros
((
num_translations
,
2
),
dtypes
.
float32
),
],
axis
=
1
)
def
transform
(
images
,
transforms
,
fill_mode
=
'reflect'
,
fill_value
=
0.0
,
interpolation
=
'bilinear'
,
output_shape
=
None
,
name
=
None
):
"""Applies the given transform(s) to the image(s).
Args:
images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
(NHWC), (num_rows, num_columns, num_channels) (HWC), or (num_rows,
num_columns) (HW). The rank must be statically known (the shape is not
`TensorShape(None)`.
transforms: Projective transform matrix/matrices. A vector of length 8 or
tensor of size N x 8. If one row of transforms is [a0, a1, a2, b0, b1, b2,
c0, c1], then it maps the *output* point `(x, y)` to a transformed *input*
point `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where
`k = c0 x + c1 y + 1`. The transforms are *inverted* compared to the
transform mapping input points to output points. Note that gradients are
not backpropagated into transformation parameters.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{'constant', 'reflect', 'wrap', 'nearest'}`).
fill_value: a float represents the value to be filled outside the boundaries
when `fill_mode` is "constant".
interpolation: Interpolation mode. Supported values: "nearest", "bilinear".
output_shape: Output dimesion after the transform, [height, width]. If None,
output is the same size as input image.
name: The name of the op. ## Fill mode.
Behavior for each valid value is as follows: reflect (d c b a | a b c d | d c
b a) The input is extended by reflecting about the edge of the last pixel.
constant (k k k k | a b c d | k k k k) The input is extended by filling all
values beyond the edge with the same constant value k = 0. wrap (a b c d |
a b c d | a b c d) The input is extended by wrapping around to the opposite
edge. nearest (a a a a | a b c d | d d d d) The input is extended by the
nearest pixel.
Input shape:
4D tensor with shape: `(samples, height, width, channels)`,
data_format='channels_last'.
Output shape:
4D tensor with shape: `(samples, height, width, channels)`,
data_format='channels_last'.
Returns:
Image(s) with the same type and shape as `images`, with the given
transform(s) applied. Transformed coordinates outside of the input image
will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
ValueError: If output shape is not 1-D int32 Tensor.
"""
with
backend
.
name_scope
(
name
or
'transform'
):
if
output_shape
is
None
:
output_shape
=
array_ops
.
shape
(
images
)[
1
:
3
]
if
not
context
.
executing_eagerly
():
output_shape_value
=
tensor_util
.
constant_value
(
output_shape
)
if
output_shape_value
is
not
None
:
output_shape
=
output_shape_value
output_shape
=
ops
.
convert_to_tensor_v2_with_dispatch
(
output_shape
,
dtypes
.
int32
,
name
=
'output_shape'
)
if
not
output_shape
.
get_shape
().
is_compatible_with
([
2
]):
raise
ValueError
(
'output_shape must be a 1-D Tensor of 2 elements: '
'new_height, new_width, instead got '
'{}'
.
format
(
output_shape
))
fill_value
=
ops
.
convert_to_tensor_v2_with_dispatch
(
fill_value
,
dtypes
.
float32
,
name
=
'fill_value'
)
if
compat
.
forward_compatible
(
2020
,
8
,
5
):
return
gen_image_ops
.
ImageProjectiveTransformV3
(
images
=
images
,
output_shape
=
output_shape
,
fill_value
=
fill_value
,
transforms
=
transforms
,
fill_mode
=
fill_mode
.
upper
(),
interpolation
=
interpolation
.
upper
())
return
gen_image_ops
.
ImageProjectiveTransformV2
(
images
=
images
,
output_shape
=
output_shape
,
transforms
=
transforms
,
fill_mode
=
fill_mode
.
upper
(),
interpolation
=
interpolation
.
upper
())
def
make_generator
(
seed
=
None
):
"""Creates a random generator.
Args:
seed: the seed to initialize the generator. If None, the generator will be
initialized non-deterministically.
Returns:
A generator object.
"""
if
seed
:
return
stateful_random_ops
.
Generator
.
from_seed
(
seed
)
else
:
return
stateful_random_ops
.
Generator
.
from_non_deterministic_state
()
HORIZONTAL
=
'horizontal'
VERTICAL
=
'vertical'
HORIZONTAL_AND_VERTICAL
=
'horizontal_and_vertical'
class
RandomFlip_prob
(
RandomFlip
):
def
__init__
(
self
,
mode
=
HORIZONTAL_AND_VERTICAL
,
seed
=
None
,
p
=
0.5
,
**
kwargs
):
super
(
RandomFlip
,
self
).
__init__
(
**
kwargs
)
base_preprocessing_layer
.
keras_kpl_gauge
.
get_cell
(
'RandomFlip'
).
set
(
True
)
self
.
mode
=
mode
self
.
p
=
p
if
mode
==
HORIZONTAL
:
self
.
horizontal
=
True
self
.
vertical
=
False
elif
mode
==
VERTICAL
:
self
.
horizontal
=
False
self
.
vertical
=
True
elif
mode
==
HORIZONTAL_AND_VERTICAL
:
self
.
horizontal
=
True
self
.
vertical
=
True
else
:
raise
ValueError
(
'RandomFlip layer {name} received an unknown mode '
'argument {arg}'
.
format
(
name
=
self
.
name
,
arg
=
mode
))
self
.
seed
=
seed
self
.
_rng
=
make_generator
(
self
.
seed
)
self
.
input_spec
=
InputSpec
(
ndim
=
4
)
def
call
(
self
,
inputs
,
training
=
True
):
if
training
is
None
:
training
=
backend
.
learning_phase
()
def
random_flipped_inputs
():
flipped_outputs
=
inputs
if
tf
.
random
.
uniform
([])
<
self
.
p
:
return
flipped_outputs
if
self
.
horizontal
:
flipped_outputs
=
image_ops
.
random_flip_left_right
(
flipped_outputs
,
self
.
seed
)
if
self
.
vertical
:
flipped_outputs
=
image_ops
.
random_flip_up_down
(
flipped_outputs
,
self
.
seed
)
return
flipped_outputs
output
=
control_flow_util
.
smart_cond
(
training
,
random_flipped_inputs
,
lambda
:
inputs
)
output
.
set_shape
(
inputs
.
shape
)
return
output
def
compute_output_shape
(
self
,
input_shape
):
return
input_shape
def
get_config
(
self
):
config
=
{
'mode'
:
self
.
mode
,
'seed'
:
self
.
seed
,
}
base_config
=
super
(
RandomFlip
,
self
).
get_config
()
return
dict
(
list
(
base_config
.
items
())
+
list
(
config
.
items
()))
class
RandomTranslation_prob
(
RandomTranslation
):
def
__init__
(
self
,
height_factor
,
width_factor
,
fill_mode
=
'reflect'
,
interpolation
=
'bilinear'
,
seed
=
None
,
p
=
0.5
,
fill_value
=
0.0
,
**
kwargs
):
self
.
height_factor
=
height_factor
self
.
p
=
p
if
isinstance
(
height_factor
,
(
tuple
,
list
)):
self
.
height_lower
=
height_factor
[
0
]
self
.
height_upper
=
height_factor
[
1
]
else
:
self
.
height_lower
=
-
height_factor
self
.
height_upper
=
height_factor
if
self
.
height_upper
<
self
.
height_lower
:
raise
ValueError
(
'`height_factor` cannot have upper bound less than '
'lower bound, got {}'
.
format
(
height_factor
))
if
abs
(
self
.
height_lower
)
>
1.
or
abs
(
self
.
height_upper
)
>
1.
:
raise
ValueError
(
'`height_factor` must have values between [-1, 1], '
'got {}'
.
format
(
height_factor
))
self
.
width_factor
=
width_factor
if
isinstance
(
width_factor
,
(
tuple
,
list
)):
self
.
width_lower
=
width_factor
[
0
]
self
.
width_upper
=
width_factor
[
1
]
else
:
self
.
width_lower
=
-
width_factor
self
.
width_upper
=
width_factor
if
self
.
width_upper
<
self
.
width_lower
:
raise
ValueError
(
'`width_factor` cannot have upper bound less than '
'lower bound, got {}'
.
format
(
width_factor
))
if
abs
(
self
.
width_lower
)
>
1.
or
abs
(
self
.
width_upper
)
>
1.
:
raise
ValueError
(
'`width_factor` must have values between [-1, 1], '
'got {}'
.
format
(
width_factor
))
check_fill_mode_and_interpolation
(
fill_mode
,
interpolation
)
self
.
fill_mode
=
fill_mode
self
.
fill_value
=
fill_value
self
.
interpolation
=
interpolation
self
.
seed
=
seed
self
.
_rng
=
make_generator
(
self
.
seed
)
self
.
input_spec
=
InputSpec
(
ndim
=
4
)
super
(
RandomTranslation
,
self
).
__init__
(
**
kwargs
)
base_preprocessing_layer
.
keras_kpl_gauge
.
get_cell
(
'RandomTranslation'
).
set
(
True
)
def
call
(
self
,
inputs
,
training
=
True
):
if
training
is
None
:
training
=
backend
.
learning_phase
()
def
random_translated_inputs
():
if
tf
.
random
.
uniform
([])
<
self
.
p
:
return
inputs
"""Translated inputs with random ops."""
inputs_shape
=
array_ops
.
shape
(
inputs
)
batch_size
=
inputs_shape
[
0
]
h_axis
,
w_axis
=
H_AXIS
,
W_AXIS
img_hd
=
math_ops
.
cast
(
inputs_shape
[
h_axis
],
dtypes
.
float32
)
img_wd
=
math_ops
.
cast
(
inputs_shape
[
w_axis
],
dtypes
.
float32
)
height_translate
=
self
.
_rng
.
uniform
(
shape
=
[
batch_size
,
1
],
minval
=
self
.
height_lower
,
maxval
=
self
.
height_upper
,
dtype
=
dtypes
.
float32
)
height_translate
=
height_translate
*
img_hd
width_translate
=
self
.
_rng
.
uniform
(
shape
=
[
batch_size
,
1
],
minval
=
self
.
width_lower
,
maxval
=
self
.
width_upper
,
dtype
=
dtypes
.
float32
)
width_translate
=
width_translate
*
img_wd
translations
=
math_ops
.
cast
(
array_ops
.
concat
([
width_translate
,
height_translate
],
axis
=
1
),
dtype
=
dtypes
.
float32
)
return
transform
(
inputs
,
get_translation_matrix
(
translations
),
interpolation
=
self
.
interpolation
,
fill_mode
=
self
.
fill_mode
,
fill_value
=
self
.
fill_value
)
output
=
control_flow_util
.
smart_cond
(
training
,
random_translated_inputs
,
lambda
:
inputs
)
output
.
set_shape
(
inputs
.
shape
)
return
output
class
RandomRotation_prob
(
RandomRotation
):
def
__init__
(
self
,
factor
,
fill_mode
=
'reflect'
,
interpolation
=
'bilinear'
,
seed
=
None
,
p
=
0.5
,
fill_value
=
0.0
,
**
kwargs
):
self
.
factor
=
factor
self
.
p
=
p
if
isinstance
(
factor
,
(
tuple
,
list
)):
self
.
lower
=
factor
[
0
]
self
.
upper
=
factor
[
1
]
else
:
self
.
lower
=
-
factor
self
.
upper
=
factor
if
self
.
upper
<
self
.
lower
:
raise
ValueError
(
'Factor cannot have negative values, '
'got {}'
.
format
(
factor
))
check_fill_mode_and_interpolation
(
fill_mode
,
interpolation
)
self
.
fill_mode
=
fill_mode
self
.
fill_value
=
fill_value
self
.
interpolation
=
interpolation
self
.
seed
=
seed
self
.
_rng
=
make_generator
(
self
.
seed
)
self
.
input_spec
=
InputSpec
(
ndim
=
4
)
super
(
RandomRotation
,
self
).
__init__
(
**
kwargs
)
base_preprocessing_layer
.
keras_kpl_gauge
.
get_cell
(
'RandomRotation'
).
set
(
True
)
def
call
(
self
,
inputs
,
training
=
True
):
if
training
is
None
:
training
=
backend
.
learning_phase
()
def
random_rotated_inputs
():
if
tf
.
random
.
uniform
([])
<
self
.
p
:
return
inputs
"""Rotated inputs with random ops."""
inputs_shape
=
array_ops
.
shape
(
inputs
)
batch_size
=
inputs_shape
[
0
]
img_hd
=
math_ops
.
cast
(
inputs_shape
[
H_AXIS
],
dtypes
.
float32
)
img_wd
=
math_ops
.
cast
(
inputs_shape
[
W_AXIS
],
dtypes
.
float32
)
min_angle
=
self
.
lower
*
2.
*
np
.
pi
max_angle
=
self
.
upper
*
2.
*
np
.
pi
angles
=
self
.
_rng
.
uniform
(
shape
=
[
batch_size
],
minval
=
min_angle
,
maxval
=
max_angle
)
return
transform
(
inputs
,
get_rotation_matrix
(
angles
,
img_hd
,
img_wd
),
fill_mode
=
self
.
fill_mode
,
fill_value
=
self
.
fill_value
,
interpolation
=
self
.
interpolation
)
output
=
control_flow_util
.
smart_cond
(
training
,
random_rotated_inputs
,
lambda
:
inputs
)
output
.
set_shape
(
inputs
.
shape
)
return
output
CV_Classification/feature_extraction.py
0 → 100644
浏览文件 @
79ee1180
import
numpy
as
np
import
os
import
PIL
import
PIL.Image
import
tensorflow
as
tf
import
tensorflow_datasets
as
tfds
import
datetime
from
tensorflow.keras.callbacks
import
ReduceLROnPlateau
from
tensorflow.python.keras
import
backend
from
tensorflow.python.platform
import
tf_logging
as
logging
from
custom_augmentation
import
*
import
pathlib
import
argparse
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
"--key"
,
type
=
str
)
args
=
parser
.
parse_args
()
batch_size
=
128
img_height
=
180
img_width
=
180
img_size
=
(
img_height
,
img_width
,
3
)
augmentation_dict
=
{
'RandomFlip'
:
tf
.
keras
.
layers
.
experimental
.
preprocessing
.
RandomFlip
(
"horizontal_and_vertical"
),
'RandomRotation'
:
tf
.
keras
.
layers
.
experimental
.
preprocessing
.
RandomRotation
(
0.2
),
'RandomContrast'
:
tf
.
keras
.
layers
.
experimental
.
preprocessing
.
RandomContrast
(
0.2
),
'RandomZoom'
:
tf
.
keras
.
layers
.
experimental
.
preprocessing
.
RandomZoom
(
height_factor
=
0.1
,
width_factor
=
0.1
),
'RandomTranslation'
:
tf
.
keras
.
layers
.
experimental
.
preprocessing
.
RandomTranslation
(
height_factor
=
0.1
,
width_factor
=
0.1
),
'RandomCrop'
:
tf
.
keras
.
layers
.
experimental
.
preprocessing
.
RandomCrop
(
img_height
,
img_width
),
'RandomFlip_prob'
:
RandomFlip_prob
(
"horizontal_and_vertical"
),
'RandomRotation_prob'
:
RandomRotation_prob
(
0.2
),
'RandomTranslation_prob'
:
RandomTranslation_prob
(
height_factor
=
0.1
,
width_factor
=
0.1
),
}
dataset_url
=
"https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir
=
tf
.
keras
.
utils
.
get_file
(
origin
=
dataset_url
,
fname
=
'flower_photos'
,
untar
=
True
)
data_dir
=
pathlib
.
Path
(
data_dir
)
image_count
=
len
(
list
(
data_dir
.
glob
(
'*/*.jpg'
)))
print
(
image_count
)
train_ds
=
tf
.
keras
.
preprocessing
.
image_dataset_from_directory
(
data_dir
,
validation_split
=
0.2
,
subset
=
"training"
,
seed
=
123
,
image_size
=
(
img_height
,
img_width
),
batch_size
=
batch_size
)
val_ds
=
tf
.
keras
.
preprocessing
.
image_dataset_from_directory
(
data_dir
,
validation_split
=
0.2
,
subset
=
"validation"
,
seed
=
123
,
image_size
=
(
img_height
,
img_width
),
batch_size
=
batch_size
)
class_names
=
train_ds
.
class_names
print
(
class_names
)
AUTOTUNE
=
tf
.
data
.
AUTOTUNE
train_ds
=
train_ds
.
shuffle
(
buffer_size
=
1000
).
cache
().
prefetch
(
buffer_size
=
AUTOTUNE
)
val_ds
=
val_ds
.
cache
().
prefetch
(
buffer_size
=
AUTOTUNE
)
num_classes
=
5
data_augmentation
=
tf
.
keras
.
Sequential
([
augmentation_dict
[
args
.
key
],
])
preprocess_input
=
tf
.
keras
.
applications
.
mobilenet_v2
.
preprocess_input
base_model
=
tf
.
keras
.
applications
.
MobileNetV2
(
input_shape
=
img_size
,
include_top
=
False
,
weights
=
'imagenet'
)
base_model
.
trainable
=
False
inputs
=
tf
.
keras
.
Input
(
shape
=
img_size
)
x
=
data_augmentation
(
inputs
)
x
=
preprocess_input
(
x
)
x
=
base_model
(
x
,
training
=
False
)
x
=
tf
.
keras
.
layers
.
GlobalAveragePooling2D
()(
x
)
x
=
tf
.
keras
.
layers
.
Dropout
(
0.2
)(
x
)
outputs
=
tf
.
keras
.
layers
.
Dense
(
num_classes
)(
x
)
model
=
tf
.
keras
.
Model
(
inputs
,
outputs
)
print
(
model
.
summary
())
optimizer
=
tf
.
keras
.
optimizers
.
Adam
(
learning_rate
=
0.001
)
model
.
compile
(
optimizer
=
optimizer
,
loss
=
tf
.
losses
.
SparseCategoricalCrossentropy
(
from_logits
=
True
),
metrics
=
[
'accuracy'
])
log_dir
=
"logs/fit_2/mobilenetv2_"
+
str
(
args
.
key
)
+
'_'
+
datetime
.
datetime
.
now
().
strftime
(
"%Y%m%d-%H%M%S"
)
file_writer
=
tf
.
summary
.
create_file_writer
(
log_dir
+
'/lr'
)
file_writer
.
set_as_default
()
early_stop
=
tf
.
keras
.
callbacks
.
EarlyStopping
(
monitor
=
'val_loss'
,
min_delta
=
0.001
,
patience
=
5
,
restore_best_weights
=
True
)
tensorboard_callback
=
tf
.
keras
.
callbacks
.
TensorBoard
(
log_dir
=
log_dir
,
histogram_freq
=
1
)
class
MyCallback
(
ReduceLROnPlateau
):
def
on_epoch_end
(
self
,
epoch
,
logs
=
None
):
logs
=
logs
or
{}
logs
[
'lr'
]
=
backend
.
get_value
(
self
.
model
.
optimizer
.
lr
)
current
=
logs
.
get
(
self
.
monitor
)
if
current
is
None
:
logging
.
warning
(
'Learning rate reduction is conditioned on metric `%s` '
'which is not available. Available metrics are: %s'
,
self
.
monitor
,
','
.
join
(
list
(
logs
.
keys
())))
else
:
if
self
.
in_cooldown
():
self
.
cooldown_counter
-=
1
self
.
wait
=
0
if
self
.
monitor_op
(
current
,
self
.
best
):
self
.
best
=
current
self
.
wait
=
0
elif
not
self
.
in_cooldown
():
self
.
wait
+=
1
if
self
.
wait
>=
self
.
patience
:
old_lr
=
backend
.
get_value
(
self
.
model
.
optimizer
.
lr
)
if
old_lr
>
np
.
float32
(
self
.
min_lr
):
new_lr
=
old_lr
*
self
.
factor
new_lr
=
max
(
new_lr
,
self
.
min_lr
)
tf
.
summary
.
scalar
(
'learning rate'
,
data
=
new_lr
,
step
=
epoch
)
backend
.
set_value
(
self
.
model
.
optimizer
.
lr
,
new_lr
)
if
self
.
verbose
>
0
:
print
(
'
\n
Epoch %05d: ReduceLROnPlateau reducing learning '
'rate to %s.'
%
(
epoch
+
1
,
new_lr
))
self
.
cooldown_counter
=
self
.
cooldown
self
.
wait
=
0
reduce_lr
=
MyCallback
(
monitor
=
'val_loss'
,
factor
=
0.2
,
patience
=
3
,
min_lr
=
1e-6
)
model
.
fit
(
train_ds
,
validation_data
=
val_ds
,
epochs
=
100
,
callbacks
=
[
reduce_lr
,
early_stop
,
tensorboard_callback
],
verbose
=
2
)
print
(
model
.
evaluate
(
val_ds
))
CV_Classification/finetune.py
0 → 100644
浏览文件 @
79ee1180
import
numpy
as
np
import
os
import
PIL
import
PIL.Image
import
tensorflow
as
tf
import
tensorflow_datasets
as
tfds
import
datetime
from
tensorflow.keras.callbacks
import
ReduceLROnPlateau
from
tensorflow.python.keras
import
backend
from
tensorflow.python.platform
import
tf_logging
as
logging
from
custom_augmentation
import
*
from
tensorflow.keras
import
backend
as
K
import
pathlib
import
argparse
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
"--key"
,
type
=
str
)
args
=
parser
.
parse_args
()
batch_size
=
128
img_height
=
180
img_width
=
180
img_size
=
(
img_height
,
img_width
,
3
)
augmentation_dict
=
{
'RandomFlip'
:
tf
.
keras
.
layers
.
experimental
.
preprocessing
.
RandomFlip
(
"horizontal_and_vertical"
),
'RandomRotation'
:
tf
.
keras
.
layers
.
experimental
.
preprocessing
.
RandomRotation
(
0.2
),
'RandomContrast'
:
tf
.
keras
.
layers
.
experimental
.
preprocessing
.
RandomContrast
(
0.2
),
'RandomZoom'
:
tf
.
keras
.
layers
.
experimental
.
preprocessing
.
RandomZoom
(
height_factor
=
0.1
,
width_factor
=
0.1
),
'RandomTranslation'
:
tf
.
keras
.
layers
.
experimental
.
preprocessing
.
RandomTranslation
(
height_factor
=
0.1
,
width_factor
=
0.1
),
'RandomCrop'
:
tf
.
keras
.
layers
.
experimental
.
preprocessing
.
RandomCrop
(
img_height
,
img_width
),
'RandomFlip_prob'
:
RandomFlip_prob
(
"horizontal_and_vertical"
),
'RandomRotation_prob'
:
RandomRotation_prob
(
0.2
),
'RandomTranslation_prob'
:
RandomTranslation_prob
(
height_factor
=
0.1
,
width_factor
=
0.1
),
}
dataset_url
=
"https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir
=
tf
.
keras
.
utils
.
get_file
(
origin
=
dataset_url
,
fname
=
'flower_photos'
,
untar
=
True
)
data_dir
=
pathlib
.
Path
(
data_dir
)
image_count
=
len
(
list
(
data_dir
.
glob
(
'*/*.jpg'
)))
print
(
image_count
)
train_ds
=
tf
.
keras
.
preprocessing
.
image_dataset_from_directory
(
data_dir
,
validation_split
=
0.2
,
subset
=
"training"
,
seed
=
123
,
image_size
=
(
img_height
,
img_width
),
batch_size
=
batch_size
)
val_ds
=
tf
.
keras
.
preprocessing
.
image_dataset_from_directory
(
data_dir
,
validation_split
=
0.2
,
subset
=
"validation"
,
seed
=
123
,
image_size
=
(
img_height
,
img_width
),
batch_size
=
batch_size
)
class_names
=
train_ds
.
class_names
print
(
class_names
)
AUTOTUNE
=
tf
.
data
.
AUTOTUNE
train_ds
=
train_ds
.
shuffle
(
buffer_size
=
1000
).
cache
().
prefetch
(
buffer_size
=
AUTOTUNE
)
val_ds
=
val_ds
.
cache
().
prefetch
(
buffer_size
=
AUTOTUNE
)
num_classes
=
5
data_augmentation
=
tf
.
keras
.
Sequential
([
augmentation_dict
[
args
.
key
],
])
preprocess_input
=
tf
.
keras
.
applications
.
mobilenet_v2
.
preprocess_input
base_model
=
tf
.
keras
.
applications
.
MobileNetV2
(
input_shape
=
img_size
,
include_top
=
False
,
weights
=
'imagenet'
)
base_model
.
trainable
=
True
# Let's take a look to see how many layers are in the base model
print
(
"Number of layers in the base model: "
,
len
(
base_model
.
layers
))
# Fine-tune from this layer onwards
fine_tune_at
=
100
# Freeze all the layers before the `fine_tune_at` layer
for
layer
in
base_model
.
layers
[:
fine_tune_at
]:
layer
.
trainable
=
False
inputs
=
tf
.
keras
.
Input
(
shape
=
img_size
)
x
=
data_augmentation
(
inputs
)
x
=
preprocess_input
(
x
)
x
=
base_model
(
x
,
training
=
False
)
x
=
tf
.
keras
.
layers
.
GlobalAveragePooling2D
()(
x
)
x
=
tf
.
keras
.
layers
.
Dropout
(
0.2
)(
x
)
outputs
=
tf
.
keras
.
layers
.
Dense
(
num_classes
)(
x
)
model
=
tf
.
keras
.
Model
(
inputs
,
outputs
)
model
.
load_weights
(
'./save_models'
)
print
(
model
.
summary
())
optimizer
=
tf
.
keras
.
optimizers
.
Adam
(
learning_rate
=
1e-4
)
model
.
compile
(
optimizer
=
optimizer
,
loss
=
tf
.
losses
.
SparseCategoricalCrossentropy
(
from_logits
=
True
),
metrics
=
[
'accuracy'
])
K
.
set_value
(
model
.
optimizer
.
learning_rate
,
1e-4
)
log_dir
=
"logs/fit_1_finetune/mobilenetv2_"
+
str
(
args
.
key
)
+
'_'
+
datetime
.
datetime
.
now
().
strftime
(
"%Y%m%d-%H%M%S"
)
file_writer
=
tf
.
summary
.
create_file_writer
(
log_dir
+
'/lr'
)
file_writer
.
set_as_default
()
early_stop
=
tf
.
keras
.
callbacks
.
EarlyStopping
(
monitor
=
'val_loss'
,
min_delta
=
0.001
,
patience
=
5
,
restore_best_weights
=
True
)
tensorboard_callback
=
tf
.
keras
.
callbacks
.
TensorBoard
(
log_dir
=
log_dir
,
histogram_freq
=
1
)
class
MyCallback
(
ReduceLROnPlateau
):
def
on_epoch_end
(
self
,
epoch
,
logs
=
None
):
logs
=
logs
or
{}
logs
[
'lr'
]
=
backend
.
get_value
(
self
.
model
.
optimizer
.
lr
)
current
=
logs
.
get
(
self
.
monitor
)
if
current
is
None
:
logging
.
warning
(
'Learning rate reduction is conditioned on metric `%s` '
'which is not available. Available metrics are: %s'
,
self
.
monitor
,
','
.
join
(
list
(
logs
.
keys
())))
else
:
if
self
.
in_cooldown
():
self
.
cooldown_counter
-=
1
self
.
wait
=
0
if
self
.
monitor_op
(
current
,
self
.
best
):
self
.
best
=
current
self
.
wait
=
0
elif
not
self
.
in_cooldown
():
self
.
wait
+=
1
if
self
.
wait
>=
self
.
patience
:
old_lr
=
backend
.
get_value
(
self
.
model
.
optimizer
.
lr
)
if
old_lr
>
np
.
float32
(
self
.
min_lr
):
new_lr
=
old_lr
*
self
.
factor
new_lr
=
max
(
new_lr
,
self
.
min_lr
)
tf
.
summary
.
scalar
(
'learning rate'
,
data
=
new_lr
,
step
=
epoch
)
backend
.
set_value
(
self
.
model
.
optimizer
.
lr
,
new_lr
)
if
self
.
verbose
>
0
:
print
(
'
\n
Epoch %05d: ReduceLROnPlateau reducing learning '
'rate to %s.'
%
(
epoch
+
1
,
new_lr
))
self
.
cooldown_counter
=
self
.
cooldown
self
.
wait
=
0
reduce_lr
=
MyCallback
(
monitor
=
'val_loss'
,
factor
=
0.2
,
patience
=
3
,
min_lr
=
1e-6
)
model
.
fit
(
train_ds
,
validation_data
=
val_ds
,
epochs
=
100
,
callbacks
=
[
reduce_lr
,
early_stop
,
tensorboard_callback
],
verbose
=
2
)
print
(
model
.
evaluate
(
val_ds
))
CV_Classification/run.sh
0 → 100644
浏览文件 @
79ee1180
#!/bin/bash
listVar
=
"RandomFlip RandomRotation RandomContrast RandomZoom RandomTranslation RandomCrop RandomFlip_prob RandomRotation_prob RandomTranslation_prob"
for
i
in
$listVar
;
do
echo
"
$i
"
python 3.py
--key
"
$i
"
done
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录