Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
2da8570a
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2da8570a
编写于
4月 02, 2020
作者:
L
lvliang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
pynative-add-lenet
上级
9c8a0b7f
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
155 addition
and
0 deletion
+155
-0
tests/st/pynative/test_ascend_lenet.py
tests/st/pynative/test_ascend_lenet.py
+155
-0
未找到文件。
tests/st/pynative/test_ascend_lenet.py
0 → 100644
浏览文件 @
2da8570a
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import
pytest
import
numpy
as
np
import
time
,
math
import
mindspore.nn
as
nn
from
mindspore
import
context
,
Tensor
,
ParameterTuple
from
mindspore.ops
import
operations
as
P
from
mindspore.common.initializer
import
TruncatedNormal
from
mindspore.ops
import
functional
as
F
from
mindspore.ops
import
composite
as
C
from
mindspore.common
import
dtype
as
mstype
from
mindspore.nn.wrap.cell_wrapper
import
WithLossCell
from
mindspore.nn.optim
import
Momentum
np
.
random
.
seed
(
1
)
def
weight_variable
():
"""weight initial"""
return
TruncatedNormal
(
0.02
)
def
conv
(
in_channels
,
out_channels
,
kernel_size
,
stride
=
1
,
padding
=
0
):
"""weight initial for conv layer"""
weight
=
weight_variable
()
return
nn
.
Conv2d
(
in_channels
,
out_channels
,
kernel_size
=
kernel_size
,
stride
=
stride
,
padding
=
padding
,
weight_init
=
weight
,
has_bias
=
False
,
pad_mode
=
"valid"
)
def
fc_with_initialize
(
input_channels
,
out_channels
):
"""weight initial for fc layer"""
weight
=
weight_variable
()
bias
=
weight_variable
()
return
nn
.
Dense
(
input_channels
,
out_channels
,
weight
,
bias
)
class
LeNet
(
nn
.
Cell
):
"""
Lenet network
Args:
num_class (int): Num classes, Default: 10.
Returns:
Tensor, output tensor
Examples:
>>> LeNet(num_class=10)
"""
def
__init__
(
self
,
num_class
=
10
):
super
(
LeNet
,
self
).
__init__
()
self
.
num_class
=
num_class
self
.
batch_size
=
32
self
.
conv1
=
conv
(
1
,
6
,
5
)
self
.
conv2
=
conv
(
6
,
16
,
5
)
self
.
fc1
=
fc_with_initialize
(
16
*
5
*
5
,
120
)
self
.
fc2
=
fc_with_initialize
(
120
,
84
)
self
.
fc3
=
fc_with_initialize
(
84
,
self
.
num_class
)
self
.
relu
=
nn
.
ReLU
()
self
.
max_pool2d
=
nn
.
MaxPool2d
(
kernel_size
=
2
,
stride
=
2
)
self
.
reshape
=
P
.
Reshape
()
def
construct
(
self
,
x
):
x
=
self
.
conv1
(
x
)
x
=
self
.
relu
(
x
)
x
=
self
.
max_pool2d
(
x
)
x
=
self
.
conv2
(
x
)
x
=
self
.
relu
(
x
)
x
=
self
.
max_pool2d
(
x
)
x
=
self
.
reshape
(
x
,
(
self
.
batch_size
,
-
1
))
x
=
self
.
fc1
(
x
)
x
=
self
.
relu
(
x
)
x
=
self
.
fc2
(
x
)
x
=
self
.
relu
(
x
)
x
=
self
.
fc3
(
x
)
return
x
class
CrossEntropyLoss
(
nn
.
Cell
):
"""
Define loss for network
"""
def
__init__
(
self
):
super
(
CrossEntropyLoss
,
self
).
__init__
()
self
.
cross_entropy
=
P
.
SoftmaxCrossEntropyWithLogits
()
self
.
mean
=
P
.
ReduceMean
()
self
.
one_hot
=
P
.
OneHot
()
self
.
on_value
=
Tensor
(
1.0
,
mstype
.
float32
)
self
.
off_value
=
Tensor
(
0.0
,
mstype
.
float32
)
self
.
num
=
Tensor
(
32.0
,
mstype
.
float32
)
def
construct
(
self
,
logits
,
label
):
label
=
self
.
one_hot
(
label
,
F
.
shape
(
logits
)[
1
],
self
.
on_value
,
self
.
off_value
)
loss
=
self
.
cross_entropy
(
logits
,
label
)[
0
]
loss
=
P
.
RealDiv
()(
P
.
ReduceSum
()(
loss
,
-
1
),
self
.
num
)
return
loss
class
GradWrap
(
nn
.
Cell
):
"""
GradWrap definition
"""
def
__init__
(
self
,
network
):
super
(
GradWrap
,
self
).
__init__
()
self
.
network
=
network
self
.
weights
=
ParameterTuple
(
filter
(
lambda
x
:
x
.
requires_grad
,
network
.
get_parameters
()))
def
construct
(
self
,
x
,
label
):
weights
=
self
.
weights
return
C
.
grad_by_list
(
self
.
network
,
weights
)(
x
,
label
)
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
env_single
def
test_ascend_pynative_lenet
():
context
.
set_context
(
mode
=
context
.
PYNATIVE_MODE
,
device_target
=
"Ascend"
)
epoch_size
=
20
batch_size
=
32
inputs
=
Tensor
(
np
.
ones
([
batch_size
,
1
,
32
,
32
]).
astype
(
np
.
float32
))
labels
=
Tensor
(
np
.
ones
([
batch_size
]).
astype
(
np
.
int32
))
net
=
LeNet
()
criterion
=
CrossEntropyLoss
()
optimizer
=
Momentum
(
filter
(
lambda
x
:
x
.
requires_grad
,
net
.
get_parameters
()),
0.1
,
0.9
)
net_with_criterion
=
WithLossCell
(
net
,
criterion
)
train_network
=
GradWrap
(
net_with_criterion
)
train_network
.
set_train
()
total_time
=
0
for
epoch
in
range
(
0
,
epoch_size
):
start_time
=
time
.
time
()
fw_output
=
net
(
inputs
)
loss_output
=
criterion
(
fw_output
,
labels
)
grads
=
train_network
(
inputs
,
labels
)
success
=
optimizer
(
grads
)
end_time
=
time
.
time
()
cost_time
=
end_time
-
start_time
total_time
=
total_time
+
cost_time
assert
(
total_time
<
20.0
)
assert
(
loss_output
.
asnumpy
()
<
0.01
)
\ No newline at end of file
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录