Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MindSpore
mindarmour
提交
e585e2b0
M
mindarmour
项目概览
MindSpore
/
mindarmour
通知
4
Star
2
Fork
3
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindarmour
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e585e2b0
编写于
5月 16, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
5月 16, 2020
浏览文件
操作
浏览文件
下载
差异文件
!19 Remove redundant code of files under mindarmour/example/mnist_demo/
Merge pull request !19 from jxlang910/master
上级
b7ef2e0b
4c4c34ed
变更
19
隐藏空白更改
内联
并排
Showing
19 changed file
with
71 addition
and
1235 deletion
+71
-1235
example/mnist_demo/lenet5_mnist_coverage.py
example/mnist_demo/lenet5_mnist_coverage.py
+2
-1
example/mnist_demo/lenet5_mnist_fuzzing.py
example/mnist_demo/lenet5_mnist_fuzzing.py
+2
-1
example/mnist_demo/mnist_attack_cw.py
example/mnist_demo/mnist_attack_cw.py
+3
-81
example/mnist_demo/mnist_attack_deepfool.py
example/mnist_demo/mnist_attack_deepfool.py
+3
-82
example/mnist_demo/mnist_attack_fgsm.py
example/mnist_demo/mnist_attack_fgsm.py
+3
-80
example/mnist_demo/mnist_attack_genetic.py
example/mnist_demo/mnist_attack_genetic.py
+3
-88
example/mnist_demo/mnist_attack_hsja.py
example/mnist_demo/mnist_attack_hsja.py
+3
-82
example/mnist_demo/mnist_attack_jsma.py
example/mnist_demo/mnist_attack_jsma.py
+3
-86
example/mnist_demo/mnist_attack_lbfgs.py
example/mnist_demo/mnist_attack_lbfgs.py
+3
-94
example/mnist_demo/mnist_attack_mdi2fgsm.py
example/mnist_demo/mnist_attack_mdi2fgsm.py
+4
-78
example/mnist_demo/mnist_attack_nes.py
example/mnist_demo/mnist_attack_nes.py
+3
-90
example/mnist_demo/mnist_attack_pgd.py
example/mnist_demo/mnist_attack_pgd.py
+3
-80
example/mnist_demo/mnist_attack_pointwise.py
example/mnist_demo/mnist_attack_pointwise.py
+3
-86
example/mnist_demo/mnist_attack_pso.py
example/mnist_demo/mnist_attack_pso.py
+3
-81
example/mnist_demo/mnist_attack_salt_and_pepper.py
example/mnist_demo/mnist_attack_salt_and_pepper.py
+3
-90
example/mnist_demo/mnist_defense_nad.py
example/mnist_demo/mnist_defense_nad.py
+3
-105
example/mnist_demo/mnist_evaluation.py
example/mnist_demo/mnist_evaluation.py
+17
-16
example/mnist_demo/mnist_similarity_detector.py
example/mnist_demo/mnist_similarity_detector.py
+5
-8
example/mnist_demo/mnist_train.py
example/mnist_demo/mnist_train.py
+2
-6
未找到文件。
example/mnist_demo/lenet5_mnist_coverage.py
浏览文件 @
e585e2b0
...
...
@@ -33,7 +33,6 @@ LOGGER.set_level('INFO')
def
test_lenet_mnist_coverage
():
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
...
...
@@ -85,4 +84,6 @@ def test_lenet_mnist_coverage():
if
__name__
==
'__main__'
:
# device_target can be "CPU", "GPU" or "Ascend"
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
test_lenet_mnist_coverage
()
example/mnist_demo/lenet5_mnist_fuzzing.py
浏览文件 @
e585e2b0
...
...
@@ -32,7 +32,6 @@ LOGGER.set_level('INFO')
def
test_lenet_mnist_fuzzing
():
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
...
...
@@ -87,4 +86,6 @@ def test_lenet_mnist_fuzzing():
if
__name__
==
'__main__'
:
# device_target can be "CPU", "GPU" or "Ascend"
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
test_lenet_mnist_fuzzing
()
example/mnist_demo/mnist_attack_cw.py
浏览文件 @
e585e2b0
...
...
@@ -15,7 +15,6 @@ import sys
import
time
import
numpy
as
np
import
pytest
from
mindspore
import
Model
from
mindspore
import
Tensor
from
mindspore
import
context
...
...
@@ -36,89 +35,10 @@ LOGGER.set_level('INFO')
TAG
=
'CW_Test'
@
pytest
.
mark
.
level1
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
env_card
@
pytest
.
mark
.
component_mindarmour
def
test_carlini_wagner_attack
():
"""
CW-Attack test
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
load_dict
=
load_checkpoint
(
ckpt_name
)
load_param_into_net
(
net
,
load_dict
)
# get test data
data_list
=
"./MNIST_unzip/test"
batch_size
=
32
ds
=
generate_mnist_dataset
(
data_list
,
batch_size
=
batch_size
)
# prediction accuracy before attack
model
=
Model
(
net
)
batch_num
=
3
# the number of batches of attacking samples
test_images
=
[]
test_labels
=
[]
predict_labels
=
[]
i
=
0
for
data
in
ds
.
create_tuple_iterator
():
i
+=
1
images
=
data
[
0
].
astype
(
np
.
float32
)
labels
=
data
[
1
]
test_images
.
append
(
images
)
test_labels
.
append
(
labels
)
pred_labels
=
np
.
argmax
(
model
.
predict
(
Tensor
(
images
)).
asnumpy
(),
axis
=
1
)
predict_labels
.
append
(
pred_labels
)
if
i
>=
batch_num
:
break
predict_labels
=
np
.
concatenate
(
predict_labels
)
true_labels
=
np
.
concatenate
(
test_labels
)
accuracy
=
np
.
mean
(
np
.
equal
(
predict_labels
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy before attacking is : %s"
,
accuracy
)
# attacking
num_classes
=
10
attack
=
CarliniWagnerL2Attack
(
net
,
num_classes
,
targeted
=
False
)
start_time
=
time
.
clock
()
adv_data
=
attack
.
batch_generate
(
np
.
concatenate
(
test_images
),
np
.
concatenate
(
test_labels
),
batch_size
=
32
)
stop_time
=
time
.
clock
()
pred_logits_adv
=
model
.
predict
(
Tensor
(
adv_data
)).
asnumpy
()
# rescale predict confidences into (0, 1).
pred_logits_adv
=
softmax
(
pred_logits_adv
,
axis
=
1
)
pred_labels_adv
=
np
.
argmax
(
pred_logits_adv
,
axis
=
1
)
accuracy_adv
=
np
.
mean
(
np
.
equal
(
pred_labels_adv
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy after attacking is : %s"
,
accuracy_adv
)
test_labels
=
np
.
eye
(
10
)[
np
.
concatenate
(
test_labels
)]
attack_evaluate
=
AttackEvaluate
(
np
.
concatenate
(
test_images
).
transpose
(
0
,
2
,
3
,
1
),
test_labels
,
adv_data
.
transpose
(
0
,
2
,
3
,
1
),
pred_logits_adv
)
LOGGER
.
info
(
TAG
,
'mis-classification rate of adversaries is : %s'
,
attack_evaluate
.
mis_classification_rate
())
LOGGER
.
info
(
TAG
,
'The average confidence of adversarial class is : %s'
,
attack_evaluate
.
avg_conf_adv_class
())
LOGGER
.
info
(
TAG
,
'The average confidence of true class is : %s'
,
attack_evaluate
.
avg_conf_true_class
())
LOGGER
.
info
(
TAG
,
'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_lp_distance
())
LOGGER
.
info
(
TAG
,
'The average structural similarity between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_ssim
())
LOGGER
.
info
(
TAG
,
'The average costing time is %s'
,
(
stop_time
-
start_time
)
/
(
batch_num
*
batch_size
))
def
test_carlini_wagner_attack_cpu
():
"""
CW-Attack test for CPU device.
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
...
...
@@ -188,4 +108,6 @@ def test_carlini_wagner_attack_cpu():
if
__name__
==
'__main__'
:
test_carlini_wagner_attack_cpu
()
# device_target can be "CPU", "GPU" or "Ascend"
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
test_carlini_wagner_attack
()
example/mnist_demo/mnist_attack_deepfool.py
浏览文件 @
e585e2b0
...
...
@@ -15,7 +15,6 @@ import sys
import
time
import
numpy
as
np
import
pytest
from
mindspore
import
Model
from
mindspore
import
Tensor
from
mindspore
import
context
...
...
@@ -36,90 +35,10 @@ LOGGER.set_level('INFO')
TAG
=
'DeepFool_Test'
@
pytest
.
mark
.
level1
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
env_card
@
pytest
.
mark
.
component_mindarmour
def
test_deepfool_attack
():
"""
DeepFool-Attack test
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
load_dict
=
load_checkpoint
(
ckpt_name
)
load_param_into_net
(
net
,
load_dict
)
# get test data
data_list
=
"./MNIST_unzip/test"
batch_size
=
32
ds
=
generate_mnist_dataset
(
data_list
,
batch_size
=
batch_size
)
# prediction accuracy before attack
model
=
Model
(
net
)
batch_num
=
3
# the number of batches of attacking samples
test_images
=
[]
test_labels
=
[]
predict_labels
=
[]
i
=
0
for
data
in
ds
.
create_tuple_iterator
():
i
+=
1
images
=
data
[
0
].
astype
(
np
.
float32
)
labels
=
data
[
1
]
test_images
.
append
(
images
)
test_labels
.
append
(
labels
)
pred_labels
=
np
.
argmax
(
model
.
predict
(
Tensor
(
images
)).
asnumpy
(),
axis
=
1
)
predict_labels
.
append
(
pred_labels
)
if
i
>=
batch_num
:
break
predict_labels
=
np
.
concatenate
(
predict_labels
)
true_labels
=
np
.
concatenate
(
test_labels
)
accuracy
=
np
.
mean
(
np
.
equal
(
predict_labels
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy before attacking is : %s"
,
accuracy
)
# attacking
classes
=
10
attack
=
DeepFool
(
net
,
classes
,
norm_level
=
2
,
bounds
=
(
0.0
,
1.0
))
start_time
=
time
.
clock
()
adv_data
=
attack
.
batch_generate
(
np
.
concatenate
(
test_images
),
np
.
concatenate
(
test_labels
),
batch_size
=
32
)
stop_time
=
time
.
clock
()
pred_logits_adv
=
model
.
predict
(
Tensor
(
adv_data
)).
asnumpy
()
# rescale predict confidences into (0, 1).
pred_logits_adv
=
softmax
(
pred_logits_adv
,
axis
=
1
)
pred_labels_adv
=
np
.
argmax
(
pred_logits_adv
,
axis
=
1
)
accuracy_adv
=
np
.
mean
(
np
.
equal
(
pred_labels_adv
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy after attacking is : %s"
,
accuracy_adv
)
test_labels
=
np
.
eye
(
10
)[
np
.
concatenate
(
test_labels
)]
attack_evaluate
=
AttackEvaluate
(
np
.
concatenate
(
test_images
).
transpose
(
0
,
2
,
3
,
1
),
test_labels
,
adv_data
.
transpose
(
0
,
2
,
3
,
1
),
pred_logits_adv
)
LOGGER
.
info
(
TAG
,
'mis-classification rate of adversaries is : %s'
,
attack_evaluate
.
mis_classification_rate
())
LOGGER
.
info
(
TAG
,
'The average confidence of adversarial class is : %s'
,
attack_evaluate
.
avg_conf_adv_class
())
LOGGER
.
info
(
TAG
,
'The average confidence of true class is : %s'
,
attack_evaluate
.
avg_conf_true_class
())
LOGGER
.
info
(
TAG
,
'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_lp_distance
())
LOGGER
.
info
(
TAG
,
'The average structural similarity between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_ssim
())
LOGGER
.
info
(
TAG
,
'The average costing time is %s'
,
(
stop_time
-
start_time
)
/
(
batch_num
*
batch_size
))
def
test_deepfool_attack_cpu
():
"""
DeepFool-Attack test for CPU device.
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
...
...
@@ -190,4 +109,6 @@ def test_deepfool_attack_cpu():
if
__name__
==
'__main__'
:
test_deepfool_attack_cpu
()
# device_target can be "CPU", "GPU" or "Ascend"
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
test_deepfool_attack
()
example/mnist_demo/mnist_attack_fgsm.py
浏览文件 @
e585e2b0
...
...
@@ -15,7 +15,6 @@ import sys
import
time
import
numpy
as
np
import
pytest
from
mindspore
import
Model
from
mindspore
import
Tensor
from
mindspore
import
context
...
...
@@ -37,88 +36,10 @@ LOGGER.set_level('INFO')
TAG
=
'FGSM_Test'
@
pytest
.
mark
.
level1
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
env_card
@
pytest
.
mark
.
component_mindarmour
def
test_fast_gradient_sign_method
():
"""
FGSM-Attack test
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
load_dict
=
load_checkpoint
(
ckpt_name
)
load_param_into_net
(
net
,
load_dict
)
# get test data
data_list
=
"./MNIST_unzip/test"
batch_size
=
32
ds
=
generate_mnist_dataset
(
data_list
,
batch_size
,
sparse
=
False
)
# prediction accuracy before attack
model
=
Model
(
net
)
batch_num
=
3
# the number of batches of attacking samples
test_images
=
[]
test_labels
=
[]
predict_labels
=
[]
i
=
0
for
data
in
ds
.
create_tuple_iterator
():
i
+=
1
images
=
data
[
0
].
astype
(
np
.
float32
)
labels
=
data
[
1
]
test_images
.
append
(
images
)
test_labels
.
append
(
labels
)
pred_labels
=
np
.
argmax
(
model
.
predict
(
Tensor
(
images
)).
asnumpy
(),
axis
=
1
)
predict_labels
.
append
(
pred_labels
)
if
i
>=
batch_num
:
break
predict_labels
=
np
.
concatenate
(
predict_labels
)
true_labels
=
np
.
argmax
(
np
.
concatenate
(
test_labels
),
axis
=
1
)
accuracy
=
np
.
mean
(
np
.
equal
(
predict_labels
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy before attacking is : %s"
,
accuracy
)
# attacking
attack
=
FastGradientSignMethod
(
net
,
eps
=
0.3
)
start_time
=
time
.
clock
()
adv_data
=
attack
.
batch_generate
(
np
.
concatenate
(
test_images
),
np
.
concatenate
(
test_labels
),
batch_size
=
32
)
stop_time
=
time
.
clock
()
np
.
save
(
'./adv_data'
,
adv_data
)
pred_logits_adv
=
model
.
predict
(
Tensor
(
adv_data
)).
asnumpy
()
# rescale predict confidences into (0, 1).
pred_logits_adv
=
softmax
(
pred_logits_adv
,
axis
=
1
)
pred_labels_adv
=
np
.
argmax
(
pred_logits_adv
,
axis
=
1
)
accuracy_adv
=
np
.
mean
(
np
.
equal
(
pred_labels_adv
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy after attacking is : %s"
,
accuracy_adv
)
attack_evaluate
=
AttackEvaluate
(
np
.
concatenate
(
test_images
).
transpose
(
0
,
2
,
3
,
1
),
np
.
concatenate
(
test_labels
),
adv_data
.
transpose
(
0
,
2
,
3
,
1
),
pred_logits_adv
)
LOGGER
.
info
(
TAG
,
'mis-classification rate of adversaries is : %s'
,
attack_evaluate
.
mis_classification_rate
())
LOGGER
.
info
(
TAG
,
'The average confidence of adversarial class is : %s'
,
attack_evaluate
.
avg_conf_adv_class
())
LOGGER
.
info
(
TAG
,
'The average confidence of true class is : %s'
,
attack_evaluate
.
avg_conf_true_class
())
LOGGER
.
info
(
TAG
,
'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_lp_distance
())
LOGGER
.
info
(
TAG
,
'The average structural similarity between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_ssim
())
LOGGER
.
info
(
TAG
,
'The average costing time is %s'
,
(
stop_time
-
start_time
)
/
(
batch_num
*
batch_size
))
def
test_fast_gradient_sign_method_cpu
():
"""
FGSM-Attack test for CPU device.
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
...
...
@@ -188,4 +109,6 @@ def test_fast_gradient_sign_method_cpu():
if
__name__
==
'__main__'
:
test_fast_gradient_sign_method_cpu
()
# device_target can be "CPU", "GPU" or "Ascend"
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
test_fast_gradient_sign_method
()
example/mnist_demo/mnist_attack_genetic.py
浏览文件 @
e585e2b0
...
...
@@ -15,7 +15,6 @@ import sys
import
time
import
numpy
as
np
import
pytest
from
mindspore
import
Tensor
from
mindspore
import
context
from
mindspore.train.serialization
import
load_checkpoint
,
load_param_into_net
...
...
@@ -49,96 +48,10 @@ class ModelToBeAttacked(BlackModel):
return
result
.
asnumpy
()
@
pytest
.
mark
.
level1
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
env_card
@
pytest
.
mark
.
component_mindarmour
def
test_genetic_attack_on_mnist
():
"""
Genetic-Attack test
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
load_dict
=
load_checkpoint
(
ckpt_name
)
load_param_into_net
(
net
,
load_dict
)
# get test data
data_list
=
"./MNIST_unzip/test"
batch_size
=
32
ds
=
generate_mnist_dataset
(
data_list
,
batch_size
=
batch_size
)
# prediction accuracy before attack
model
=
ModelToBeAttacked
(
net
)
batch_num
=
3
# the number of batches of attacking samples
test_images
=
[]
test_labels
=
[]
predict_labels
=
[]
i
=
0
for
data
in
ds
.
create_tuple_iterator
():
i
+=
1
images
=
data
[
0
].
astype
(
np
.
float32
)
labels
=
data
[
1
]
test_images
.
append
(
images
)
test_labels
.
append
(
labels
)
pred_labels
=
np
.
argmax
(
model
.
predict
(
images
),
axis
=
1
)
predict_labels
.
append
(
pred_labels
)
if
i
>=
batch_num
:
break
predict_labels
=
np
.
concatenate
(
predict_labels
)
true_labels
=
np
.
concatenate
(
test_labels
)
accuracy
=
np
.
mean
(
np
.
equal
(
predict_labels
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy before attacking is : %g"
,
accuracy
)
# attacking
attack
=
GeneticAttack
(
model
=
model
,
pop_size
=
6
,
mutation_rate
=
0.05
,
per_bounds
=
0.1
,
step_size
=
0.25
,
temp
=
0.1
,
sparse
=
True
)
targeted_labels
=
np
.
random
.
randint
(
0
,
10
,
size
=
len
(
true_labels
))
for
i
,
true_l
in
enumerate
(
true_labels
):
if
targeted_labels
[
i
]
==
true_l
:
targeted_labels
[
i
]
=
(
targeted_labels
[
i
]
+
1
)
%
10
start_time
=
time
.
clock
()
success_list
,
adv_data
,
query_list
=
attack
.
generate
(
np
.
concatenate
(
test_images
),
targeted_labels
)
stop_time
=
time
.
clock
()
LOGGER
.
info
(
TAG
,
'success_list: %s'
,
success_list
)
LOGGER
.
info
(
TAG
,
'average of query times is : %s'
,
np
.
mean
(
query_list
))
pred_logits_adv
=
model
.
predict
(
adv_data
)
# rescale predict confidences into (0, 1).
pred_logits_adv
=
softmax
(
pred_logits_adv
,
axis
=
1
)
pred_lables_adv
=
np
.
argmax
(
pred_logits_adv
,
axis
=
1
)
accuracy_adv
=
np
.
mean
(
np
.
equal
(
pred_lables_adv
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy after attacking is : %g"
,
accuracy_adv
)
test_labels_onehot
=
np
.
eye
(
10
)[
true_labels
]
attack_evaluate
=
AttackEvaluate
(
np
.
concatenate
(
test_images
),
test_labels_onehot
,
adv_data
,
pred_logits_adv
,
targeted
=
True
,
target_label
=
targeted_labels
)
LOGGER
.
info
(
TAG
,
'mis-classification rate of adversaries is : %s'
,
attack_evaluate
.
mis_classification_rate
())
LOGGER
.
info
(
TAG
,
'The average confidence of adversarial class is : %s'
,
attack_evaluate
.
avg_conf_adv_class
())
LOGGER
.
info
(
TAG
,
'The average confidence of true class is : %s'
,
attack_evaluate
.
avg_conf_true_class
())
LOGGER
.
info
(
TAG
,
'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_lp_distance
())
LOGGER
.
info
(
TAG
,
'The average structural similarity between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_ssim
())
LOGGER
.
info
(
TAG
,
'The average costing time is %s'
,
(
stop_time
-
start_time
)
/
(
batch_num
*
batch_size
))
def
test_genetic_attack_on_mnist_cpu
():
"""
Genetic-Attack test for CPU device.
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
...
...
@@ -215,4 +128,6 @@ def test_genetic_attack_on_mnist_cpu():
if
__name__
==
'__main__'
:
test_genetic_attack_on_mnist_cpu
()
# device_target can be "CPU", "GPU" or "Ascend"
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
test_genetic_attack_on_mnist
()
example/mnist_demo/mnist_attack_hsja.py
浏览文件 @
e585e2b0
...
...
@@ -14,7 +14,6 @@
import
sys
import
numpy
as
np
import
pytest
from
mindspore
import
Tensor
from
mindspore
import
context
from
mindspore.train.serialization
import
load_checkpoint
,
load_param_into_net
...
...
@@ -68,90 +67,10 @@ def create_target_images(dataset, data_labels, target_labels):
return
np
.
array
(
res
)
@
pytest
.
mark
.
level1
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
env_card
@
pytest
.
mark
.
component_mindarmour
def
test_hsja_mnist_attack
():
"""
hsja-Attack test
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
)
context
.
set_context
(
device_target
=
"Ascend"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
load_dict
=
load_checkpoint
(
ckpt_name
)
load_param_into_net
(
net
,
load_dict
)
net
.
set_train
(
False
)
# get test data
data_list
=
"./MNIST_unzip/test"
batch_size
=
32
ds
=
generate_mnist_dataset
(
data_list
,
batch_size
=
batch_size
)
# prediction accuracy before attack
model
=
ModelToBeAttacked
(
net
)
batch_num
=
5
# the number of batches of attacking samples
test_images
=
[]
test_labels
=
[]
predict_labels
=
[]
i
=
0
for
data
in
ds
.
create_tuple_iterator
():
i
+=
1
images
=
data
[
0
].
astype
(
np
.
float32
)
labels
=
data
[
1
]
test_images
.
append
(
images
)
test_labels
.
append
(
labels
)
pred_labels
=
np
.
argmax
(
model
.
predict
(
images
),
axis
=
1
)
predict_labels
.
append
(
pred_labels
)
if
i
>=
batch_num
:
break
predict_labels
=
np
.
concatenate
(
predict_labels
)
true_labels
=
np
.
concatenate
(
test_labels
)
accuracy
=
np
.
mean
(
np
.
equal
(
predict_labels
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy before attacking is : %s"
,
accuracy
)
test_images
=
np
.
concatenate
(
test_images
)
# attacking
norm
=
'l2'
search
=
'grid_search'
target
=
False
attack
=
HopSkipJumpAttack
(
model
,
constraint
=
norm
,
stepsize_search
=
search
)
if
target
:
target_labels
=
random_target_labels
(
true_labels
)
target_images
=
create_target_images
(
test_images
,
predict_labels
,
target_labels
)
attack
.
set_target_images
(
target_images
)
success_list
,
adv_data
,
_
=
attack
.
generate
(
test_images
,
target_labels
)
else
:
success_list
,
adv_data
,
_
=
attack
.
generate
(
test_images
,
None
)
adv_datas
=
[]
gts
=
[]
for
success
,
adv
,
gt
in
zip
(
success_list
,
adv_data
,
true_labels
):
if
success
:
adv_datas
.
append
(
adv
)
gts
.
append
(
gt
)
if
gts
:
adv_datas
=
np
.
concatenate
(
np
.
asarray
(
adv_datas
),
axis
=
0
)
gts
=
np
.
asarray
(
gts
)
pred_logits_adv
=
model
.
predict
(
adv_datas
)
pred_lables_adv
=
np
.
argmax
(
pred_logits_adv
,
axis
=
1
)
accuracy_adv
=
np
.
mean
(
np
.
equal
(
pred_lables_adv
,
gts
))
mis_rate
=
(
1
-
accuracy_adv
)
*
(
len
(
adv_datas
)
/
len
(
success_list
))
LOGGER
.
info
(
TAG
,
'mis-classification rate of adversaries is : %s'
,
mis_rate
)
def
test_hsja_mnist_attack_cpu
():
"""
hsja-Attack test
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
)
context
.
set_context
(
device_target
=
"CPU"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
...
...
@@ -220,4 +139,6 @@ def test_hsja_mnist_attack_cpu():
if
__name__
==
'__main__'
:
test_hsja_mnist_attack_cpu
()
# device_target can be "CPU", "GPU" or "Ascend"
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
test_hsja_mnist_attack
()
example/mnist_demo/mnist_attack_jsma.py
浏览文件 @
e585e2b0
...
...
@@ -15,7 +15,6 @@ import sys
import
time
import
numpy
as
np
import
pytest
from
mindspore
import
Model
from
mindspore
import
Tensor
from
mindspore
import
context
...
...
@@ -38,94 +37,10 @@ LOGGER.set_level('INFO')
TAG
=
'JSMA_Test'
@
pytest
.
mark
.
level1
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
env_card
@
pytest
.
mark
.
component_mindarmour
def
test_jsma_attack
():
"""
JSMA-Attack test
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
load_dict
=
load_checkpoint
(
ckpt_name
)
load_param_into_net
(
net
,
load_dict
)
# get test data
data_list
=
"./MNIST_unzip/test"
batch_size
=
32
ds
=
generate_mnist_dataset
(
data_list
,
batch_size
=
batch_size
)
# prediction accuracy before attack
model
=
Model
(
net
)
batch_num
=
3
# the number of batches of attacking samples
test_images
=
[]
test_labels
=
[]
predict_labels
=
[]
i
=
0
for
data
in
ds
.
create_tuple_iterator
():
i
+=
1
images
=
data
[
0
].
astype
(
np
.
float32
)
labels
=
data
[
1
]
test_images
.
append
(
images
)
test_labels
.
append
(
labels
)
pred_labels
=
np
.
argmax
(
model
.
predict
(
Tensor
(
images
)).
asnumpy
(),
axis
=
1
)
predict_labels
.
append
(
pred_labels
)
if
i
>=
batch_num
:
break
predict_labels
=
np
.
concatenate
(
predict_labels
)
true_labels
=
np
.
concatenate
(
test_labels
)
targeted_labels
=
np
.
random
.
randint
(
0
,
10
,
size
=
len
(
true_labels
))
for
i
,
true_l
in
enumerate
(
true_labels
):
if
targeted_labels
[
i
]
==
true_l
:
targeted_labels
[
i
]
=
(
targeted_labels
[
i
]
+
1
)
%
10
accuracy
=
np
.
mean
(
np
.
equal
(
predict_labels
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy before attacking is : %g"
,
accuracy
)
# attacking
classes
=
10
attack
=
JSMAAttack
(
net
,
classes
)
start_time
=
time
.
clock
()
adv_data
=
attack
.
batch_generate
(
np
.
concatenate
(
test_images
),
targeted_labels
,
batch_size
=
32
)
stop_time
=
time
.
clock
()
pred_logits_adv
=
model
.
predict
(
Tensor
(
adv_data
)).
asnumpy
()
# rescale predict confidences into (0, 1).
pred_logits_adv
=
softmax
(
pred_logits_adv
,
axis
=
1
)
pred_lables_adv
=
np
.
argmax
(
pred_logits_adv
,
axis
=
1
)
accuracy_adv
=
np
.
mean
(
np
.
equal
(
pred_lables_adv
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy after attacking is : %g"
,
accuracy_adv
)
test_labels
=
np
.
eye
(
10
)[
np
.
concatenate
(
test_labels
)]
attack_evaluate
=
AttackEvaluate
(
np
.
concatenate
(
test_images
).
transpose
(
0
,
2
,
3
,
1
),
test_labels
,
adv_data
.
transpose
(
0
,
2
,
3
,
1
),
pred_logits_adv
,
targeted
=
True
,
target_label
=
targeted_labels
)
LOGGER
.
info
(
TAG
,
'mis-classification rate of adversaries is : %s'
,
attack_evaluate
.
mis_classification_rate
())
LOGGER
.
info
(
TAG
,
'The average confidence of adversarial class is : %s'
,
attack_evaluate
.
avg_conf_adv_class
())
LOGGER
.
info
(
TAG
,
'The average confidence of true class is : %s'
,
attack_evaluate
.
avg_conf_true_class
())
LOGGER
.
info
(
TAG
,
'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_lp_distance
())
LOGGER
.
info
(
TAG
,
'The average structural similarity between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_ssim
())
LOGGER
.
info
(
TAG
,
'The average costing time is %s'
,
(
stop_time
-
start_time
)
/
(
batch_num
*
batch_size
))
def
test_jsma_attack_cpu
():
"""
JSMA-Attack test for CPU device.
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
...
...
@@ -200,4 +115,6 @@ def test_jsma_attack_cpu():
if
__name__
==
'__main__'
:
test_jsma_attack_cpu
()
# device_target can be "CPU", "GPU" or "Ascend"
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
test_jsma_attack
()
example/mnist_demo/mnist_attack_lbfgs.py
浏览文件 @
e585e2b0
...
...
@@ -15,7 +15,6 @@ import sys
import
time
import
numpy
as
np
import
pytest
from
mindspore
import
Model
from
mindspore
import
Tensor
from
mindspore
import
context
...
...
@@ -37,102 +36,10 @@ LOGGER.set_level('INFO')
TAG
=
'LBFGS_Test'
@
pytest
.
mark
.
level1
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
env_card
@
pytest
.
mark
.
component_mindarmour
def
test_lbfgs_attack
():
"""
LBFGS-Attack test
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
load_dict
=
load_checkpoint
(
ckpt_name
)
load_param_into_net
(
net
,
load_dict
)
# get test data
data_list
=
"./MNIST_unzip/test"
batch_size
=
32
ds
=
generate_mnist_dataset
(
data_list
,
batch_size
=
batch_size
,
sparse
=
False
)
# prediction accuracy before attack
model
=
Model
(
net
)
batch_num
=
3
# the number of batches of attacking samples
test_images
=
[]
test_labels
=
[]
predict_labels
=
[]
i
=
0
for
data
in
ds
.
create_tuple_iterator
():
i
+=
1
images
=
data
[
0
].
astype
(
np
.
float32
)
labels
=
data
[
1
]
test_images
.
append
(
images
)
test_labels
.
append
(
labels
)
pred_labels
=
np
.
argmax
(
model
.
predict
(
Tensor
(
images
)).
asnumpy
(),
axis
=
1
)
predict_labels
.
append
(
pred_labels
)
if
i
>=
batch_num
:
break
predict_labels
=
np
.
concatenate
(
predict_labels
)
true_labels
=
np
.
argmax
(
np
.
concatenate
(
test_labels
),
axis
=
1
)
accuracy
=
np
.
mean
(
np
.
equal
(
predict_labels
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy before attacking is : %s"
,
accuracy
)
# attacking
is_targeted
=
True
if
is_targeted
:
targeted_labels
=
np
.
random
.
randint
(
0
,
10
,
size
=
len
(
true_labels
)).
astype
(
np
.
int32
)
for
i
,
true_l
in
enumerate
(
true_labels
):
if
targeted_labels
[
i
]
==
true_l
:
targeted_labels
[
i
]
=
(
targeted_labels
[
i
]
+
1
)
%
10
else
:
targeted_labels
=
true_labels
.
astype
(
np
.
int32
)
targeted_labels
=
np
.
eye
(
10
)[
targeted_labels
].
astype
(
np
.
float32
)
attack
=
LBFGS
(
net
,
is_targeted
=
is_targeted
)
start_time
=
time
.
clock
()
adv_data
=
attack
.
batch_generate
(
np
.
concatenate
(
test_images
),
targeted_labels
,
batch_size
=
batch_size
)
stop_time
=
time
.
clock
()
pred_logits_adv
=
model
.
predict
(
Tensor
(
adv_data
)).
asnumpy
()
# rescale predict confidences into (0, 1).
pred_logits_adv
=
softmax
(
pred_logits_adv
,
axis
=
1
)
pred_labels_adv
=
np
.
argmax
(
pred_logits_adv
,
axis
=
1
)
accuracy_adv
=
np
.
mean
(
np
.
equal
(
pred_labels_adv
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy after attacking is : %s"
,
accuracy_adv
)
attack_evaluate
=
AttackEvaluate
(
np
.
concatenate
(
test_images
).
transpose
(
0
,
2
,
3
,
1
),
np
.
concatenate
(
test_labels
),
adv_data
.
transpose
(
0
,
2
,
3
,
1
),
pred_logits_adv
,
targeted
=
is_targeted
,
target_label
=
np
.
argmax
(
targeted_labels
,
axis
=
1
))
LOGGER
.
info
(
TAG
,
'mis-classification rate of adversaries is : %s'
,
attack_evaluate
.
mis_classification_rate
())
LOGGER
.
info
(
TAG
,
'The average confidence of adversarial class is : %s'
,
attack_evaluate
.
avg_conf_adv_class
())
LOGGER
.
info
(
TAG
,
'The average confidence of true class is : %s'
,
attack_evaluate
.
avg_conf_true_class
())
LOGGER
.
info
(
TAG
,
'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_lp_distance
())
LOGGER
.
info
(
TAG
,
'The average structural similarity between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_ssim
())
LOGGER
.
info
(
TAG
,
'The average costing time is %s'
,
(
stop_time
-
start_time
)
/
(
batch_num
*
batch_size
))
def
test_lbfgs_attack_cpu
():
"""
LBFGS-Attack test for CPU device.
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
...
...
@@ -214,4 +121,6 @@ def test_lbfgs_attack_cpu():
if
__name__
==
'__main__'
:
test_lbfgs_attack_cpu
()
# device_target can be "CPU", "GPU" or "Ascend"
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
test_lbfgs_attack
()
example/mnist_demo/mnist_attack_mdi2fgsm.py
浏览文件 @
e585e2b0
...
...
@@ -15,7 +15,6 @@ import sys
import
time
import
numpy
as
np
import
pytest
from
mindspore
import
Model
from
mindspore
import
Tensor
from
mindspore
import
context
...
...
@@ -37,83 +36,8 @@ LOGGER = LogUtil.get_instance()
TAG
=
'M_DI2_FGSM_Test'
LOGGER
.
set_level
(
'INFO'
)
@
pytest
.
mark
.
level1
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
env_card
@
pytest
.
mark
.
component_mindarmour
def
test_momentum_diverse_input_iterative_method
():
"""
M-DI2-FGSM Attack Test
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
load_dict
=
load_checkpoint
(
ckpt_name
)
load_param_into_net
(
net
,
load_dict
)
# get test data
data_list
=
"./MNIST_unzip/test"
batch_size
=
32
ds
=
generate_mnist_dataset
(
data_list
,
batch_size
,
sparse
=
False
)
# prediction accuracy before attack
model
=
Model
(
net
)
batch_num
=
32
# the number of batches of attacking samples
test_images
=
[]
test_labels
=
[]
predict_labels
=
[]
i
=
0
for
data
in
ds
.
create_tuple_iterator
():
i
+=
1
images
=
data
[
0
].
astype
(
np
.
float32
)
labels
=
data
[
1
]
test_images
.
append
(
images
)
test_labels
.
append
(
labels
)
pred_labels
=
np
.
argmax
(
model
.
predict
(
Tensor
(
images
)).
asnumpy
(),
axis
=
1
)
predict_labels
.
append
(
pred_labels
)
if
i
>=
batch_num
:
break
predict_labels
=
np
.
concatenate
(
predict_labels
)
true_labels
=
np
.
argmax
(
np
.
concatenate
(
test_labels
),
axis
=
1
)
accuracy
=
np
.
mean
(
np
.
equal
(
predict_labels
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy before attacking is : %s"
,
accuracy
)
# attacking
attack
=
MomentumDiverseInputIterativeMethod
(
net
)
start_time
=
time
.
clock
()
adv_data
=
attack
.
batch_generate
(
np
.
concatenate
(
test_images
),
np
.
concatenate
(
test_labels
),
batch_size
=
32
)
stop_time
=
time
.
clock
()
pred_logits_adv
=
model
.
predict
(
Tensor
(
adv_data
)).
asnumpy
()
# rescale predict confidences into (0, 1).
pred_logits_adv
=
softmax
(
pred_logits_adv
,
axis
=
1
)
pred_labels_adv
=
np
.
argmax
(
pred_logits_adv
,
axis
=
1
)
accuracy_adv
=
np
.
mean
(
np
.
equal
(
pred_labels_adv
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy after attacking is : %s"
,
accuracy_adv
)
attack_evaluate
=
AttackEvaluate
(
np
.
concatenate
(
test_images
).
transpose
(
0
,
2
,
3
,
1
),
np
.
concatenate
(
test_labels
),
adv_data
.
transpose
(
0
,
2
,
3
,
1
),
pred_logits_adv
)
LOGGER
.
info
(
TAG
,
'mis-classification rate of adversaries is : %s'
,
attack_evaluate
.
mis_classification_rate
())
LOGGER
.
info
(
TAG
,
'The average confidence of adversarial class is : %s'
,
attack_evaluate
.
avg_conf_adv_class
())
LOGGER
.
info
(
TAG
,
'The average confidence of true class is : %s'
,
attack_evaluate
.
avg_conf_true_class
())
LOGGER
.
info
(
TAG
,
'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_lp_distance
())
LOGGER
.
info
(
TAG
,
'The average structural similarity between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_ssim
())
LOGGER
.
info
(
TAG
,
'The average costing time is %s'
,
(
stop_time
-
start_time
)
/
(
batch_num
*
batch_size
))
def
test_momentum_diverse_input_iterative_method_cpu
():
def
test_momentum_diverse_input_iterative_method
():
"""
M-DI2-FGSM Attack Test for CPU device.
"""
...
...
@@ -186,4 +110,6 @@ def test_momentum_diverse_input_iterative_method_cpu():
if
__name__
==
'__main__'
:
test_momentum_diverse_input_iterative_method_cpu
()
# device_target can be "CPU", "GPU" or "Ascend"
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
test_momentum_diverse_input_iterative_method
()
example/mnist_demo/mnist_attack_nes.py
浏览文件 @
e585e2b0
...
...
@@ -14,7 +14,6 @@
import
sys
import
numpy
as
np
import
pytest
from
mindspore
import
Tensor
from
mindspore
import
context
from
mindspore.train.serialization
import
load_checkpoint
,
load_param_into_net
...
...
@@ -78,98 +77,10 @@ def create_target_images(dataset, data_labels, target_labels):
return
np
.
array
(
res
)
@
pytest
.
mark
.
level1
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
env_card
@
pytest
.
mark
.
component_mindarmour
def
test_nes_mnist_attack
():
"""
hsja-Attack test
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
)
context
.
set_context
(
device_target
=
"Ascend"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
load_dict
=
load_checkpoint
(
ckpt_name
)
load_param_into_net
(
net
,
load_dict
)
net
.
set_train
(
False
)
# get test data
data_list
=
"./MNIST_unzip/test"
batch_size
=
32
ds
=
generate_mnist_dataset
(
data_list
,
batch_size
=
batch_size
)
# prediction accuracy before attack
model
=
ModelToBeAttacked
(
net
)
# the number of batches of attacking samples
batch_num
=
5
test_images
=
[]
test_labels
=
[]
predict_labels
=
[]
i
=
0
for
data
in
ds
.
create_tuple_iterator
():
i
+=
1
images
=
data
[
0
].
astype
(
np
.
float32
)
labels
=
data
[
1
]
test_images
.
append
(
images
)
test_labels
.
append
(
labels
)
pred_labels
=
np
.
argmax
(
model
.
predict
(
images
),
axis
=
1
)
predict_labels
.
append
(
pred_labels
)
if
i
>=
batch_num
:
break
predict_labels
=
np
.
concatenate
(
predict_labels
)
true_labels
=
np
.
concatenate
(
test_labels
)
accuracy
=
np
.
mean
(
np
.
equal
(
predict_labels
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy before attacking is : %s"
,
accuracy
)
test_images
=
np
.
concatenate
(
test_images
)
# attacking
scene
=
'Query_Limit'
if
scene
==
'Query_Limit'
:
top_k
=
-
1
elif
scene
==
'Partial_Info'
:
top_k
=
5
elif
scene
==
'Label_Only'
:
top_k
=
5
success
=
0
queries_num
=
0
nes_instance
=
NES
(
model
,
scene
,
top_k
=
top_k
)
test_length
=
32
advs
=
[]
for
img_index
in
range
(
test_length
):
# Initial image and class selection
initial_img
=
test_images
[
img_index
]
orig_class
=
true_labels
[
img_index
]
initial_img
=
[
initial_img
]
target_class
=
random_target_labels
([
orig_class
],
true_labels
)
target_image
=
create_target_images
(
test_images
,
true_labels
,
target_class
)
nes_instance
.
set_target_images
(
target_image
)
tag
,
adv
,
queries
=
nes_instance
.
generate
(
initial_img
,
target_class
)
if
tag
[
0
]:
success
+=
1
queries_num
+=
queries
[
0
]
advs
.
append
(
adv
)
advs
=
np
.
reshape
(
advs
,
(
len
(
advs
),
1
,
32
,
32
))
adv_pred
=
np
.
argmax
(
model
.
predict
(
advs
),
axis
=
1
)
adv_accuracy
=
np
.
mean
(
np
.
equal
(
adv_pred
,
true_labels
[:
test_length
]))
LOGGER
.
info
(
TAG
,
"prediction accuracy after attacking is : %s"
,
adv_accuracy
)
def
test_nes_mnist_attack_cpu
():
"""
hsja-Attack test for CPU device.
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
)
context
.
set_context
(
device_target
=
"CPU"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
...
...
@@ -246,4 +157,6 @@ def test_nes_mnist_attack_cpu():
if
__name__
==
'__main__'
:
test_nes_mnist_attack_cpu
()
# device_target can be "CPU", "GPU" or "Ascend"
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
test_nes_mnist_attack
()
example/mnist_demo/mnist_attack_pgd.py
浏览文件 @
e585e2b0
...
...
@@ -15,7 +15,6 @@ import sys
import
time
import
numpy
as
np
import
pytest
from
mindspore
import
Model
from
mindspore
import
Tensor
from
mindspore
import
context
...
...
@@ -37,88 +36,10 @@ LOGGER.set_level('INFO')
TAG
=
'PGD_Test'
@
pytest
.
mark
.
level1
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
env_card
@
pytest
.
mark
.
component_mindarmour
def
test_projected_gradient_descent_method
():
"""
PGD-Attack test
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
load_dict
=
load_checkpoint
(
ckpt_name
)
load_param_into_net
(
net
,
load_dict
)
# get test data
data_list
=
"./MNIST_unzip/test"
batch_size
=
32
ds
=
generate_mnist_dataset
(
data_list
,
batch_size
,
sparse
=
False
)
# prediction accuracy before attack
model
=
Model
(
net
)
batch_num
=
32
# the number of batches of attacking samples
test_images
=
[]
test_labels
=
[]
predict_labels
=
[]
i
=
0
for
data
in
ds
.
create_tuple_iterator
():
i
+=
1
images
=
data
[
0
].
astype
(
np
.
float32
)
labels
=
data
[
1
]
test_images
.
append
(
images
)
test_labels
.
append
(
labels
)
pred_labels
=
np
.
argmax
(
model
.
predict
(
Tensor
(
images
)).
asnumpy
(),
axis
=
1
)
predict_labels
.
append
(
pred_labels
)
if
i
>=
batch_num
:
break
predict_labels
=
np
.
concatenate
(
predict_labels
)
true_labels
=
np
.
argmax
(
np
.
concatenate
(
test_labels
),
axis
=
1
)
accuracy
=
np
.
mean
(
np
.
equal
(
predict_labels
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy before attacking is : %s"
,
accuracy
)
# attacking
attack
=
ProjectedGradientDescent
(
net
,
eps
=
0.3
)
start_time
=
time
.
clock
()
adv_data
=
attack
.
batch_generate
(
np
.
concatenate
(
test_images
),
np
.
concatenate
(
test_labels
),
batch_size
=
32
)
stop_time
=
time
.
clock
()
np
.
save
(
'./adv_data'
,
adv_data
)
pred_logits_adv
=
model
.
predict
(
Tensor
(
adv_data
)).
asnumpy
()
# rescale predict confidences into (0, 1).
pred_logits_adv
=
softmax
(
pred_logits_adv
,
axis
=
1
)
pred_labels_adv
=
np
.
argmax
(
pred_logits_adv
,
axis
=
1
)
accuracy_adv
=
np
.
mean
(
np
.
equal
(
pred_labels_adv
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy after attacking is : %s"
,
accuracy_adv
)
attack_evaluate
=
AttackEvaluate
(
np
.
concatenate
(
test_images
).
transpose
(
0
,
2
,
3
,
1
),
np
.
concatenate
(
test_labels
),
adv_data
.
transpose
(
0
,
2
,
3
,
1
),
pred_logits_adv
)
LOGGER
.
info
(
TAG
,
'mis-classification rate of adversaries is : %s'
,
attack_evaluate
.
mis_classification_rate
())
LOGGER
.
info
(
TAG
,
'The average confidence of adversarial class is : %s'
,
attack_evaluate
.
avg_conf_adv_class
())
LOGGER
.
info
(
TAG
,
'The average confidence of true class is : %s'
,
attack_evaluate
.
avg_conf_true_class
())
LOGGER
.
info
(
TAG
,
'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_lp_distance
())
LOGGER
.
info
(
TAG
,
'The average structural similarity between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_ssim
())
LOGGER
.
info
(
TAG
,
'The average costing time is %s'
,
(
stop_time
-
start_time
)
/
(
batch_num
*
batch_size
))
def
test_projected_gradient_descent_method_cpu
():
"""
PGD-Attack test for CPU device.
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
...
...
@@ -188,4 +109,6 @@ def test_projected_gradient_descent_method_cpu():
if
__name__
==
'__main__'
:
test_projected_gradient_descent_method_cpu
()
# device_target can be "CPU", "GPU" or "Ascend"
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
test_projected_gradient_descent_method
()
example/mnist_demo/mnist_attack_pointwise.py
浏览文件 @
e585e2b0
...
...
@@ -14,7 +14,6 @@
import
sys
import
numpy
as
np
import
pytest
from
mindspore
import
Tensor
from
mindspore
import
context
from
mindspore.train.serialization
import
load_checkpoint
,
load_param_into_net
...
...
@@ -49,94 +48,10 @@ class ModelToBeAttacked(BlackModel):
return
result
.
asnumpy
()
@
pytest
.
mark
.
level1
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
env_card
@
pytest
.
mark
.
component_mindarmour
def
test_pointwise_attack_on_mnist
():
"""
Salt-and-Pepper-Attack test
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
load_dict
=
load_checkpoint
(
ckpt_name
)
load_param_into_net
(
net
,
load_dict
)
# get test data
data_list
=
"./MNIST_unzip/test"
batch_size
=
32
ds
=
generate_mnist_dataset
(
data_list
,
batch_size
=
batch_size
)
# prediction accuracy before attack
model
=
ModelToBeAttacked
(
net
)
batch_num
=
3
# the number of batches of attacking samples
test_images
=
[]
test_labels
=
[]
predict_labels
=
[]
i
=
0
for
data
in
ds
.
create_tuple_iterator
():
i
+=
1
images
=
data
[
0
].
astype
(
np
.
float32
)
labels
=
data
[
1
]
test_images
.
append
(
images
)
test_labels
.
append
(
labels
)
pred_labels
=
np
.
argmax
(
model
.
predict
(
images
),
axis
=
1
)
predict_labels
.
append
(
pred_labels
)
if
i
>=
batch_num
:
break
predict_labels
=
np
.
concatenate
(
predict_labels
)
true_labels
=
np
.
concatenate
(
test_labels
)
accuracy
=
np
.
mean
(
np
.
equal
(
predict_labels
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy before attacking is : %g"
,
accuracy
)
# attacking
is_target
=
False
attack
=
PointWiseAttack
(
model
=
model
,
is_targeted
=
is_target
)
if
is_target
:
targeted_labels
=
np
.
random
.
randint
(
0
,
10
,
size
=
len
(
true_labels
))
for
i
,
true_l
in
enumerate
(
true_labels
):
if
targeted_labels
[
i
]
==
true_l
:
targeted_labels
[
i
]
=
(
targeted_labels
[
i
]
+
1
)
%
10
else
:
targeted_labels
=
true_labels
success_list
,
adv_data
,
query_list
=
attack
.
generate
(
np
.
concatenate
(
test_images
),
targeted_labels
)
success_list
=
np
.
arange
(
success_list
.
shape
[
0
])[
success_list
]
LOGGER
.
info
(
TAG
,
'success_list: %s'
,
success_list
)
LOGGER
.
info
(
TAG
,
'average of query times is : %s'
,
np
.
mean
(
query_list
))
adv_preds
=
[]
for
ite_data
in
adv_data
:
pred_logits_adv
=
model
.
predict
(
ite_data
)
# rescale predict confidences into (0, 1).
pred_logits_adv
=
softmax
(
pred_logits_adv
,
axis
=
1
)
adv_preds
.
extend
(
pred_logits_adv
)
accuracy_adv
=
np
.
mean
(
np
.
equal
(
np
.
max
(
adv_preds
,
axis
=
1
),
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy after attacking is : %g"
,
accuracy_adv
)
test_labels_onehot
=
np
.
eye
(
10
)[
true_labels
]
attack_evaluate
=
AttackEvaluate
(
np
.
concatenate
(
test_images
),
test_labels_onehot
,
adv_data
,
adv_preds
,
targeted
=
is_target
,
target_label
=
targeted_labels
)
LOGGER
.
info
(
TAG
,
'mis-classification rate of adversaries is : %s'
,
attack_evaluate
.
mis_classification_rate
())
LOGGER
.
info
(
TAG
,
'The average confidence of adversarial class is : %s'
,
attack_evaluate
.
avg_conf_adv_class
())
LOGGER
.
info
(
TAG
,
'The average confidence of true class is : %s'
,
attack_evaluate
.
avg_conf_true_class
())
LOGGER
.
info
(
TAG
,
'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_lp_distance
())
def
test_pointwise_attack_on_mnist_cpu
():
"""
Salt-and-Pepper-Attack test for CPU device.
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
...
...
@@ -211,4 +126,6 @@ def test_pointwise_attack_on_mnist_cpu():
if
__name__
==
'__main__'
:
test_pointwise_attack_on_mnist_cpu
()
# device_target can be "CPU", "GPU" or "Ascend"
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
test_pointwise_attack_on_mnist
()
example/mnist_demo/mnist_attack_pso.py
浏览文件 @
e585e2b0
...
...
@@ -15,7 +15,6 @@ import sys
import
time
import
numpy
as
np
import
pytest
from
mindspore
import
Tensor
from
mindspore
import
context
from
mindspore.train.serialization
import
load_checkpoint
,
load_param_into_net
...
...
@@ -49,89 +48,10 @@ class ModelToBeAttacked(BlackModel):
return
result
.
asnumpy
()
@
pytest
.
mark
.
level1
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
env_card
@
pytest
.
mark
.
component_mindarmour
def
test_pso_attack_on_mnist
():
"""
PSO-Attack test
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
load_dict
=
load_checkpoint
(
ckpt_name
)
load_param_into_net
(
net
,
load_dict
)
# get test data
data_list
=
"./MNIST_unzip/test"
batch_size
=
32
ds
=
generate_mnist_dataset
(
data_list
,
batch_size
=
batch_size
)
# prediction accuracy before attack
model
=
ModelToBeAttacked
(
net
)
batch_num
=
3
# the number of batches of attacking samples
test_images
=
[]
test_labels
=
[]
predict_labels
=
[]
i
=
0
for
data
in
ds
.
create_tuple_iterator
():
i
+=
1
images
=
data
[
0
].
astype
(
np
.
float32
)
labels
=
data
[
1
]
test_images
.
append
(
images
)
test_labels
.
append
(
labels
)
pred_labels
=
np
.
argmax
(
model
.
predict
(
images
),
axis
=
1
)
predict_labels
.
append
(
pred_labels
)
if
i
>=
batch_num
:
break
predict_labels
=
np
.
concatenate
(
predict_labels
)
true_labels
=
np
.
concatenate
(
test_labels
)
accuracy
=
np
.
mean
(
np
.
equal
(
predict_labels
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy before attacking is : %s"
,
accuracy
)
# attacking
attack
=
PSOAttack
(
model
,
bounds
=
(
0.0
,
1.0
),
pm
=
0.5
,
sparse
=
True
)
start_time
=
time
.
clock
()
success_list
,
adv_data
,
query_list
=
attack
.
generate
(
np
.
concatenate
(
test_images
),
np
.
concatenate
(
test_labels
))
stop_time
=
time
.
clock
()
LOGGER
.
info
(
TAG
,
'success_list: %s'
,
success_list
)
LOGGER
.
info
(
TAG
,
'average of query times is : %s'
,
np
.
mean
(
query_list
))
pred_logits_adv
=
model
.
predict
(
adv_data
)
# rescale predict confidences into (0, 1).
pred_logits_adv
=
softmax
(
pred_logits_adv
,
axis
=
1
)
pred_labels_adv
=
np
.
argmax
(
pred_logits_adv
,
axis
=
1
)
accuracy_adv
=
np
.
mean
(
np
.
equal
(
pred_labels_adv
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy after attacking is : %s"
,
accuracy_adv
)
test_labels_onehot
=
np
.
eye
(
10
)[
np
.
concatenate
(
test_labels
)]
attack_evaluate
=
AttackEvaluate
(
np
.
concatenate
(
test_images
),
test_labels_onehot
,
adv_data
,
pred_logits_adv
)
LOGGER
.
info
(
TAG
,
'mis-classification rate of adversaries is : %s'
,
attack_evaluate
.
mis_classification_rate
())
LOGGER
.
info
(
TAG
,
'The average confidence of adversarial class is : %s'
,
attack_evaluate
.
avg_conf_adv_class
())
LOGGER
.
info
(
TAG
,
'The average confidence of true class is : %s'
,
attack_evaluate
.
avg_conf_true_class
())
LOGGER
.
info
(
TAG
,
'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_lp_distance
())
LOGGER
.
info
(
TAG
,
'The average structural similarity between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_ssim
())
LOGGER
.
info
(
TAG
,
'The average costing time is %s'
,
(
stop_time
-
start_time
)
/
(
batch_num
*
batch_size
))
def
test_pso_attack_on_mnist_cpu
():
"""
PSO-Attack test for CPU device.
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
...
...
@@ -201,4 +121,6 @@ def test_pso_attack_on_mnist_cpu():
if
__name__
==
'__main__'
:
test_pso_attack_on_mnist_cpu
()
# device_target can be "CPU", "GPU" or "Ascend"
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
test_pso_attack_on_mnist
()
example/mnist_demo/mnist_attack_salt_and_pepper.py
浏览文件 @
e585e2b0
...
...
@@ -14,7 +14,6 @@
import
sys
import
numpy
as
np
import
pytest
from
mindspore
import
Tensor
from
mindspore
import
context
from
mindspore.train.serialization
import
load_checkpoint
,
load_param_into_net
...
...
@@ -49,98 +48,10 @@ class ModelToBeAttacked(BlackModel):
return
result
.
asnumpy
()
@
pytest
.
mark
.
level1
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
env_card
@
pytest
.
mark
.
component_mindarmour
def
test_salt_and_pepper_attack_on_mnist
():
"""
Salt-and-Pepper-Attack test
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
load_dict
=
load_checkpoint
(
ckpt_name
)
load_param_into_net
(
net
,
load_dict
)
# get test data
data_list
=
"./MNIST_unzip/test"
batch_size
=
32
ds
=
generate_mnist_dataset
(
data_list
,
batch_size
=
batch_size
)
# prediction accuracy before attack
model
=
ModelToBeAttacked
(
net
)
batch_num
=
3
# the number of batches of attacking samples
test_images
=
[]
test_labels
=
[]
predict_labels
=
[]
i
=
0
for
data
in
ds
.
create_tuple_iterator
():
i
+=
1
images
=
data
[
0
].
astype
(
np
.
float32
)
labels
=
data
[
1
]
test_images
.
append
(
images
)
test_labels
.
append
(
labels
)
pred_labels
=
np
.
argmax
(
model
.
predict
(
images
),
axis
=
1
)
predict_labels
.
append
(
pred_labels
)
if
i
>=
batch_num
:
break
LOGGER
.
debug
(
TAG
,
'model input image shape is: {}'
.
format
(
np
.
array
(
test_images
).
shape
))
predict_labels
=
np
.
concatenate
(
predict_labels
)
true_labels
=
np
.
concatenate
(
test_labels
)
accuracy
=
np
.
mean
(
np
.
equal
(
predict_labels
,
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy before attacking is : %g"
,
accuracy
)
# attacking
is_target
=
False
attack
=
SaltAndPepperNoiseAttack
(
model
=
model
,
is_targeted
=
is_target
,
sparse
=
True
)
if
is_target
:
targeted_labels
=
np
.
random
.
randint
(
0
,
10
,
size
=
len
(
true_labels
))
for
i
,
true_l
in
enumerate
(
true_labels
):
if
targeted_labels
[
i
]
==
true_l
:
targeted_labels
[
i
]
=
(
targeted_labels
[
i
]
+
1
)
%
10
else
:
targeted_labels
=
true_labels
LOGGER
.
debug
(
TAG
,
'input shape is: {}'
.
format
(
np
.
concatenate
(
test_images
).
shape
))
success_list
,
adv_data
,
query_list
=
attack
.
generate
(
np
.
concatenate
(
test_images
),
targeted_labels
)
success_list
=
np
.
arange
(
success_list
.
shape
[
0
])[
success_list
]
LOGGER
.
info
(
TAG
,
'success_list: %s'
,
success_list
)
LOGGER
.
info
(
TAG
,
'average of query times is : %s'
,
np
.
mean
(
query_list
))
adv_preds
=
[]
for
ite_data
in
adv_data
:
pred_logits_adv
=
model
.
predict
(
ite_data
)
# rescale predict confidences into (0, 1).
pred_logits_adv
=
softmax
(
pred_logits_adv
,
axis
=
1
)
adv_preds
.
extend
(
pred_logits_adv
)
accuracy_adv
=
np
.
mean
(
np
.
equal
(
np
.
max
(
adv_preds
,
axis
=
1
),
true_labels
))
LOGGER
.
info
(
TAG
,
"prediction accuracy after attacking is : %g"
,
accuracy_adv
)
test_labels_onehot
=
np
.
eye
(
10
)[
true_labels
]
attack_evaluate
=
AttackEvaluate
(
np
.
concatenate
(
test_images
),
test_labels_onehot
,
adv_data
,
adv_preds
,
targeted
=
is_target
,
target_label
=
targeted_labels
)
LOGGER
.
info
(
TAG
,
'mis-classification rate of adversaries is : %s'
,
attack_evaluate
.
mis_classification_rate
())
LOGGER
.
info
(
TAG
,
'The average confidence of adversarial class is : %s'
,
attack_evaluate
.
avg_conf_adv_class
())
LOGGER
.
info
(
TAG
,
'The average confidence of true class is : %s'
,
attack_evaluate
.
avg_conf_true_class
())
LOGGER
.
info
(
TAG
,
'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s'
,
attack_evaluate
.
avg_lp_distance
())
def
test_salt_and_pepper_attack_on_mnist_cpu
():
"""
Salt-and-Pepper-Attack test for CPU device.
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
# upload trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
...
...
@@ -219,4 +130,6 @@ def test_salt_and_pepper_attack_on_mnist_cpu():
if
__name__
==
'__main__'
:
test_salt_and_pepper_attack_on_mnist_cpu
()
# device_target can be "CPU", "GPU" or "Ascend"
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
test_salt_and_pepper_attack_on_mnist
()
example/mnist_demo/mnist_defense_nad.py
浏览文件 @
e585e2b0
...
...
@@ -12,11 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""defense example using nad"""
import
logging
import
sys
import
numpy
as
np
import
pytest
from
mindspore
import
Tensor
from
mindspore
import
context
from
mindspore
import
nn
...
...
@@ -36,111 +34,10 @@ LOGGER = LogUtil.get_instance()
TAG
=
'Nad_Example'
@
pytest
.
mark
.
level1
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
env_card
@
pytest
.
mark
.
component_mindarmour
def
test_nad_method
():
"""
NAD-Defense test.
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
# 1. load trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
load_dict
=
load_checkpoint
(
ckpt_name
)
load_param_into_net
(
net
,
load_dict
)
loss
=
SoftmaxCrossEntropyWithLogits
(
is_grad
=
False
,
sparse
=
False
)
opt
=
nn
.
Momentum
(
net
.
trainable_params
(),
0.01
,
0.09
)
nad
=
NaturalAdversarialDefense
(
net
,
loss_fn
=
loss
,
optimizer
=
opt
,
bounds
=
(
0.0
,
1.0
),
eps
=
0.3
)
# 2. get test data
data_list
=
"./MNIST_unzip/test"
batch_size
=
32
ds_test
=
generate_mnist_dataset
(
data_list
,
batch_size
=
batch_size
,
sparse
=
False
)
inputs
=
[]
labels
=
[]
for
data
in
ds_test
.
create_tuple_iterator
():
inputs
.
append
(
data
[
0
].
astype
(
np
.
float32
))
labels
.
append
(
data
[
1
])
inputs
=
np
.
concatenate
(
inputs
)
labels
=
np
.
concatenate
(
labels
)
# 3. get accuracy of test data on original model
net
.
set_train
(
False
)
acc_list
=
[]
batchs
=
inputs
.
shape
[
0
]
//
batch_size
for
i
in
range
(
batchs
):
batch_inputs
=
inputs
[
i
*
batch_size
:
(
i
+
1
)
*
batch_size
]
batch_labels
=
np
.
argmax
(
labels
[
i
*
batch_size
:
(
i
+
1
)
*
batch_size
],
axis
=
1
)
logits
=
net
(
Tensor
(
batch_inputs
)).
asnumpy
()
label_pred
=
np
.
argmax
(
logits
,
axis
=
1
)
acc_list
.
append
(
np
.
mean
(
batch_labels
==
label_pred
))
LOGGER
.
debug
(
TAG
,
'accuracy of TEST data on original model is : %s'
,
np
.
mean
(
acc_list
))
# 4. get adv of test data
attack
=
FastGradientSignMethod
(
net
,
eps
=
0.3
)
adv_data
=
attack
.
batch_generate
(
inputs
,
labels
)
LOGGER
.
debug
(
TAG
,
'adv_data.shape is : %s'
,
adv_data
.
shape
)
# 5. get accuracy of adv data on original model
net
.
set_train
(
False
)
acc_list
=
[]
batchs
=
adv_data
.
shape
[
0
]
//
batch_size
for
i
in
range
(
batchs
):
batch_inputs
=
adv_data
[
i
*
batch_size
:
(
i
+
1
)
*
batch_size
]
batch_labels
=
np
.
argmax
(
labels
[
i
*
batch_size
:
(
i
+
1
)
*
batch_size
],
axis
=
1
)
logits
=
net
(
Tensor
(
batch_inputs
)).
asnumpy
()
label_pred
=
np
.
argmax
(
logits
,
axis
=
1
)
acc_list
.
append
(
np
.
mean
(
batch_labels
==
label_pred
))
LOGGER
.
debug
(
TAG
,
'accuracy of adv data on original model is : %s'
,
np
.
mean
(
acc_list
))
# 6. defense
net
.
set_train
()
nad
.
batch_defense
(
inputs
,
labels
,
batch_size
=
32
,
epochs
=
10
)
# 7. get accuracy of test data on defensed model
net
.
set_train
(
False
)
acc_list
=
[]
batchs
=
inputs
.
shape
[
0
]
//
batch_size
for
i
in
range
(
batchs
):
batch_inputs
=
inputs
[
i
*
batch_size
:
(
i
+
1
)
*
batch_size
]
batch_labels
=
np
.
argmax
(
labels
[
i
*
batch_size
:
(
i
+
1
)
*
batch_size
],
axis
=
1
)
logits
=
net
(
Tensor
(
batch_inputs
)).
asnumpy
()
label_pred
=
np
.
argmax
(
logits
,
axis
=
1
)
acc_list
.
append
(
np
.
mean
(
batch_labels
==
label_pred
))
LOGGER
.
debug
(
TAG
,
'accuracy of TEST data on defensed model is : %s'
,
np
.
mean
(
acc_list
))
# 8. get accuracy of adv data on defensed model
acc_list
=
[]
batchs
=
adv_data
.
shape
[
0
]
//
batch_size
for
i
in
range
(
batchs
):
batch_inputs
=
adv_data
[
i
*
batch_size
:
(
i
+
1
)
*
batch_size
]
batch_labels
=
np
.
argmax
(
labels
[
i
*
batch_size
:
(
i
+
1
)
*
batch_size
],
axis
=
1
)
logits
=
net
(
Tensor
(
batch_inputs
)).
asnumpy
()
label_pred
=
np
.
argmax
(
logits
,
axis
=
1
)
acc_list
.
append
(
np
.
mean
(
batch_labels
==
label_pred
))
LOGGER
.
debug
(
TAG
,
'accuracy of adv data on defensed model is : %s'
,
np
.
mean
(
acc_list
))
def
test_nad_method_cpu
():
"""
NAD-Defense test for CPU device.
"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
# 1. load trained network
ckpt_name
=
'./trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net
=
LeNet5
()
...
...
@@ -231,5 +128,6 @@ def test_nad_method_cpu():
if
__name__
==
'__main__'
:
LOGGER
.
set_level
(
logging
.
DEBUG
)
test_nad_method_cpu
()
# device_target can be "CPU", "GPU" or "Ascend"
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
)
test_nad_method
()
example/mnist_demo/mnist_evaluation.py
浏览文件 @
e585e2b0
...
...
@@ -40,7 +40,6 @@ from mindarmour.utils.logger import LogUtil
sys
.
path
.
append
(
".."
)
from
data_processing
import
generate_mnist_dataset
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
LOGGER
=
LogUtil
.
get_instance
()
TAG
=
'Defense_Evaluate_Example'
...
...
@@ -140,20 +139,18 @@ def test_black_defense():
# get test data
data_list
=
"./MNIST_unzip/test"
batch_size
=
32
ds_test
=
generate_mnist_dataset
(
data_list
,
batch_size
=
batch_size
,
sparse
=
False
)
ds_test
=
generate_mnist_dataset
(
data_list
,
batch_size
=
batch_size
)
inputs
=
[]
labels
=
[]
for
data
in
ds_test
.
create_tuple_iterator
():
inputs
.
append
(
data
[
0
].
astype
(
np
.
float32
))
labels
.
append
(
data
[
1
])
inputs
=
np
.
concatenate
(
inputs
).
astype
(
np
.
float32
)
labels
=
np
.
concatenate
(
labels
).
astype
(
np
.
float32
)
labels_sparse
=
np
.
argmax
(
labels
,
axis
=
1
)
labels
=
np
.
concatenate
(
labels
).
astype
(
np
.
int32
)
target_label
=
np
.
random
.
randint
(
0
,
10
,
size
=
labels
_sparse
.
shape
[
0
])
for
idx
in
range
(
labels
_sparse
.
shape
[
0
]):
while
target_label
[
idx
]
==
labels
_sparse
[
idx
]:
target_label
=
np
.
random
.
randint
(
0
,
10
,
size
=
labels
.
shape
[
0
])
for
idx
in
range
(
labels
.
shape
[
0
]):
while
target_label
[
idx
]
==
labels
[
idx
]:
target_label
[
idx
]
=
np
.
random
.
randint
(
0
,
10
)
target_label
=
np
.
eye
(
10
)[
target_label
].
astype
(
np
.
float32
)
...
...
@@ -167,23 +164,23 @@ def test_black_defense():
wb_model
=
ModelToBeAttacked
(
wb_net
)
# gen white-box adversarial examples of test data
wb_attack
=
FastGradientSignMethod
(
wb_net
,
eps
=
0.3
)
loss
=
SoftmaxCrossEntropyWithLogits
(
is_grad
=
False
,
sparse
=
True
)
wb_attack
=
FastGradientSignMethod
(
wb_net
,
eps
=
0.3
,
loss_fn
=
loss
)
wb_adv_sample
=
wb_attack
.
generate
(
attacked_sample
,
attacked_true_label
)
wb_raw_preds
=
softmax
(
wb_model
.
predict
(
wb_adv_sample
),
axis
=
1
)
accuracy_test
=
np
.
mean
(
np
.
equal
(
np
.
argmax
(
wb_model
.
predict
(
attacked_sample
),
axis
=
1
),
np
.
argmax
(
attacked_true_label
,
axis
=
1
)
))
attacked_true_label
))
LOGGER
.
info
(
TAG
,
"prediction accuracy before white-box attack is : %s"
,
accuracy_test
)
accuracy_adv
=
np
.
mean
(
np
.
equal
(
np
.
argmax
(
wb_raw_preds
,
axis
=
1
),
np
.
argmax
(
attacked_true_label
,
axis
=
1
)
))
attacked_true_label
))
LOGGER
.
info
(
TAG
,
"prediction accuracy after white-box attack is : %s"
,
accuracy_adv
)
# improve the robustness of model with white-box adversarial examples
loss
=
SoftmaxCrossEntropyWithLogits
(
is_grad
=
False
,
sparse
=
False
)
opt
=
nn
.
Momentum
(
wb_net
.
trainable_params
(),
0.01
,
0.09
)
nad
=
NaturalAdversarialDefense
(
wb_net
,
loss_fn
=
loss
,
optimizer
=
opt
,
...
...
@@ -194,12 +191,12 @@ def test_black_defense():
wb_def_preds
=
wb_net
(
Tensor
(
wb_adv_sample
)).
asnumpy
()
wb_def_preds
=
softmax
(
wb_def_preds
,
axis
=
1
)
accuracy_def
=
np
.
mean
(
np
.
equal
(
np
.
argmax
(
wb_def_preds
,
axis
=
1
),
np
.
argmax
(
attacked_true_label
,
axis
=
1
)
))
attacked_true_label
))
LOGGER
.
info
(
TAG
,
"prediction accuracy after defense is : %s"
,
accuracy_def
)
# calculate defense evaluation metrics for defense against white-box attack
wb_def_evaluate
=
DefenseEvaluate
(
wb_raw_preds
,
wb_def_preds
,
np
.
argmax
(
attacked_true_label
,
axis
=
1
)
)
attacked_true_label
)
LOGGER
.
info
(
TAG
,
'defense evaluation for white-box adversarial attack'
)
LOGGER
.
info
(
TAG
,
'classification accuracy variance (CAV) is : {:.2f}'
.
format
(
...
...
@@ -232,7 +229,7 @@ def test_black_defense():
per_bounds
=
0.1
,
step_size
=
0.25
,
temp
=
0.1
,
sparse
=
False
)
attack_target_label
=
target_label
[:
attacked_size
]
true_label
=
labels
_sparse
[:
attacked_size
+
benign_size
]
true_label
=
labels
[:
attacked_size
+
benign_size
]
# evaluate robustness of original model
# gen black-box adversarial examples of test data
for
idx
in
range
(
attacked_size
):
...
...
@@ -323,4 +320,8 @@ def test_black_defense():
if
__name__
==
'__main__'
:
test_black_defense
()
# device_target can be "CPU", "GPU" or "Ascend"
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"GPU"
)
DEVICE
=
context
.
get_context
(
"device_target"
)
if
DEVICE
in
(
"Ascend"
,
"GPU"
):
test_black_defense
()
example/mnist_demo/mnist_similarity_detector.py
浏览文件 @
e585e2b0
...
...
@@ -14,7 +14,6 @@
import
sys
import
numpy
as
np
import
pytest
from
mindspore
import
Model
from
mindspore
import
Tensor
from
mindspore
import
context
...
...
@@ -29,7 +28,6 @@ from mindarmour.attacks.black.pso_attack import PSOAttack
from
mindarmour.detectors.black.similarity_detector
import
SimilarityDetector
from
mindarmour.utils.logger
import
LogUtil
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
sys
.
path
.
append
(
".."
)
from
data_processing
import
generate_mnist_dataset
...
...
@@ -92,11 +90,6 @@ class EncoderNet(Cell):
return
self
.
_encode_dim
@
pytest
.
mark
.
level1
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
env_card
@
pytest
.
mark
.
component_mindarmour
def
test_similarity_detector
():
"""
Similarity Detector test.
...
...
@@ -178,4 +171,8 @@ def test_similarity_detector():
if
__name__
==
'__main__'
:
test_similarity_detector
()
# device_target can be "CPU", "GPU" or "Ascend"
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"GPU"
)
DEVICE
=
context
.
get_context
(
"device_target"
)
if
DEVICE
in
(
"Ascend"
,
"GPU"
):
test_similarity_detector
()
example/mnist_demo/mnist_train.py
浏览文件 @
e585e2b0
...
...
@@ -31,12 +31,6 @@ TAG = "Lenet5_train"
def
mnist_train
(
epoch_size
,
batch_size
,
lr
,
momentum
):
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
enable_mem_reuse
=
False
)
lr
=
lr
momentum
=
momentum
epoch_size
=
epoch_size
mnist_path
=
"./MNIST_unzip/"
ds
=
generate_mnist_dataset
(
os
.
path
.
join
(
mnist_path
,
"train"
),
batch_size
=
batch_size
,
repeat_size
=
1
)
...
...
@@ -67,4 +61,6 @@ def mnist_train(epoch_size, batch_size, lr, momentum):
if
__name__
==
'__main__'
:
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"CPU"
,
enable_mem_reuse
=
False
)
mnist_train
(
10
,
32
,
0.01
,
0.9
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录