Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
9934d066
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
9934d066
编写于
9月 07, 2020
作者:
Y
yoni
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
tod fix compilation issues
上级
eface185
变更
9
展开全部
隐藏空白更改
内联
并排
Showing
9 changed file
with
12 addition
and
578 deletion
+12
-578
mindspore/lite/include/train_session.h
mindspore/lite/include/train_session.h
+1
-1
mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc
...e/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc
+1
-1
mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc
.../lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc
+0
-4
mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc
...c/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc
+2
-1
mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc
...arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc
+1
-1
mindspore/lite/src/scheduler.cc
mindspore/lite/src/scheduler.cc
+0
-1
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/activation_grad_fp32_tests.cc
...untime/kernel/arm/fp32_grad/activation_grad_fp32_tests.cc
+3
-1
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/arithmetic_grad_fp32_tests.cc
...untime/kernel/arm/fp32_grad/arithmetic_grad_fp32_tests.cc
+0
-561
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/network_test.cc
.../test/ut/src/runtime/kernel/arm/fp32_grad/network_test.cc
+4
-7
未找到文件。
mindspore/lite/include/train_session.h
浏览文件 @
9934d066
...
@@ -23,7 +23,7 @@
...
@@ -23,7 +23,7 @@
namespace
mindspore
{
namespace
mindspore
{
namespace
lite
{
namespace
lite
{
class
Model
;
struct
Model
;
}
}
namespace
lite
::
tensor
{
namespace
lite
::
tensor
{
class
Tensor
;
class
Tensor
;
...
...
mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc
浏览文件 @
9934d066
...
@@ -66,7 +66,7 @@ int ApplyMomentumCPUKernel::Init() {
...
@@ -66,7 +66,7 @@ int ApplyMomentumCPUKernel::Init() {
// Only for test with uninitialized Data
// Only for test with uninitialized Data
size_t
elem_num
=
in_tensors_
[
0
]
->
ElementsNum
();
size_t
elem_num
=
in_tensors_
[
0
]
->
ElementsNum
();
auto
accumulate
=
reinterpret_cast
<
float
*>
(
in_tensors_
[
1
]
->
Data
());
auto
accumulate
=
reinterpret_cast
<
float
*>
(
in_tensors_
[
1
]
->
Data
());
for
(
in
t
i
=
0
;
i
<
elem_num
;
i
++
)
accumulate
[
i
]
=
0.0
;
for
(
size_
t
i
=
0
;
i
<
elem_num
;
i
++
)
accumulate
[
i
]
=
0.0
;
workspace
=
new
float
[
elem_num
];
workspace
=
new
float
[
elem_num
];
return
0
;
return
0
;
...
...
mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc
浏览文件 @
9934d066
...
@@ -27,10 +27,6 @@ using mindspore::lite::RET_ERROR;
...
@@ -27,10 +27,6 @@ using mindspore::lite::RET_ERROR;
using
mindspore
::
lite
::
RET_OK
;
using
mindspore
::
lite
::
RET_OK
;
namespace
mindspore
::
kernel
{
namespace
mindspore
::
kernel
{
namespace
{
constexpr
int
kArithGradOpInputNum
=
3
;
constexpr
int
kArithGradOpOutputNum
=
2
;
}
// namespace
int
ArithmeticGradCPUKernel
::
Init
()
{
int
ArithmeticGradCPUKernel
::
Init
()
{
auto
dx1
=
out_tensors_
[
0
];
auto
dx1
=
out_tensors_
[
0
];
...
...
mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc
浏览文件 @
9934d066
...
@@ -37,9 +37,10 @@ int ConvolutionGradFilterCPUKernel::Init() {
...
@@ -37,9 +37,10 @@ int ConvolutionGradFilterCPUKernel::Init() {
MS_ASSERT
(
x_tensor
!=
nullptr
);
MS_ASSERT
(
x_tensor
!=
nullptr
);
auto
*
dy_tensor
=
in_tensors_
.
at
(
0
);
auto
*
dy_tensor
=
in_tensors_
.
at
(
0
);
MS_ASSERT
(
dy_tensor
!=
nullptr
);
MS_ASSERT
(
dy_tensor
!=
nullptr
);
#if 0
auto *weight_tensor = out_tensors_.at(0);
auto *weight_tensor = out_tensors_.at(0);
MS_ASSERT(weight_tensor != nullptr);
MS_ASSERT(weight_tensor != nullptr);
#endif
auto
conv_param
=
reinterpret_cast
<
ConvParameter
*>
(
op_parameter_
);
auto
conv_param
=
reinterpret_cast
<
ConvParameter
*>
(
op_parameter_
);
conv_param
->
output_batch_
=
dy_tensor
->
shape
().
at
(
kNHWC_N
);
conv_param
->
output_batch_
=
dy_tensor
->
shape
().
at
(
kNHWC_N
);
conv_param
->
input_batch_
=
x_tensor
->
shape
().
at
(
kNHWC_N
);
conv_param
->
input_batch_
=
x_tensor
->
shape
().
at
(
kNHWC_N
);
...
...
mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc
浏览文件 @
9934d066
...
@@ -138,7 +138,7 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Init() {
...
@@ -138,7 +138,7 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Init() {
sm_params_
.
n_dim_
=
2
;
sm_params_
.
n_dim_
=
2
;
sm_params_
.
element_size_
=
data_size
;
sm_params_
.
element_size_
=
data_size
;
sm_params_
.
axis_
=
1
;
sm_params_
.
axis_
=
1
;
for
(
in
t
i
=
0
;
i
<
dims
.
size
();
i
++
)
sm_params_
.
input_shape_
[
i
]
=
dims
[
i
];
for
(
size_
t
i
=
0
;
i
<
dims
.
size
();
i
++
)
sm_params_
.
input_shape_
[
i
]
=
dims
[
i
];
return
RET_OK
;
return
RET_OK
;
}
}
...
...
mindspore/lite/src/scheduler.cc
浏览文件 @
9934d066
...
@@ -92,7 +92,6 @@ int Scheduler::InferShape(const lite::Model *model, std::vector<tensor::Tensor *
...
@@ -92,7 +92,6 @@ int Scheduler::InferShape(const lite::Model *model, std::vector<tensor::Tensor *
for
(
size_t
j
=
0
;
j
<
in_size
;
++
j
)
{
for
(
size_t
j
=
0
;
j
<
in_size
;
++
j
)
{
inputs
.
emplace_back
(
tensors
->
at
(
node
->
input_indices_
[
j
]));
inputs
.
emplace_back
(
tensors
->
at
(
node
->
input_indices_
[
j
]));
}
}
auto
out_size
=
node
->
output_indices_
.
size
();
auto
out_size
=
node
->
output_indices_
.
size
();
for
(
size_t
j
=
0
;
j
<
out_size
;
++
j
)
{
for
(
size_t
j
=
0
;
j
<
out_size
;
++
j
)
{
outputs
.
emplace_back
(
tensors
->
at
(
node
->
output_indices_
[
j
]));
outputs
.
emplace_back
(
tensors
->
at
(
node
->
output_indices_
[
j
]));
...
...
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/activation_grad_fp32_tests.cc
浏览文件 @
9934d066
...
@@ -16,6 +16,7 @@
...
@@ -16,6 +16,7 @@
#include <iostream>
#include <iostream>
#include <memory>
#include <memory>
#include <vector>
#include <vector>
#include <algorithm>
#include "utils/log_adapter.h"
#include "utils/log_adapter.h"
#include "common/common_test.h"
#include "common/common_test.h"
...
@@ -295,7 +296,8 @@ TEST_F(TestActGradFp32, hswishGradFp32) {
...
@@ -295,7 +296,8 @@ TEST_F(TestActGradFp32, hswishGradFp32) {
printf
(
"single thread running time : %f ms
\n
"
,
time_avg
/
1000.0
f
);
printf
(
"single thread running time : %f ms
\n
"
,
time_avg
/
1000.0
f
);
printf
(
"==================output data=================
\n
"
);
printf
(
"==================output data=================
\n
"
);
for
(
int
i
=
0
;
i
<
std
::
min
(
output_data_size
,
20UL
);
i
++
)
{
size_t
min
=
(
output_data_size
<
20UL
)
?
output_data_size
:
20UL
;
for
(
size_t
i
=
0
;
i
<
min
;
i
++
)
{
std
::
cout
<<
output_data
[
i
]
<<
" ,"
;
std
::
cout
<<
output_data
[
i
]
<<
" ,"
;
}
}
std
::
cout
<<
std
::
endl
;
std
::
cout
<<
std
::
endl
;
...
...
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/arithmetic_grad_fp32_tests.cc
已删除
100644 → 0
浏览文件 @
eface185
此差异已折叠。
点击以展开。
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/network_test.cc
浏览文件 @
9934d066
...
@@ -181,7 +181,6 @@ TEST_F(NetworkTest, tuning_layer) {
...
@@ -181,7 +181,6 @@ TEST_F(NetworkTest, tuning_layer) {
}
}
meta_graph
->
inputIndex
=
{
6
,
0
};
// XXX TODO why is it reverse?
meta_graph
->
inputIndex
=
{
6
,
0
};
// XXX TODO why is it reverse?
meta_graph
->
outputIndex
=
{
5
,
14
};
meta_graph
->
outputIndex
=
{
5
,
14
};
const
int
NUM_OF_OUTPUTS
=
2
;
auto
input0
=
std
::
make_unique
<
schema
::
TensorT
>
();
auto
input0
=
std
::
make_unique
<
schema
::
TensorT
>
();
input0
->
nodeType
=
schema
::
NodeType
::
NodeType_ValueNode
;
input0
->
nodeType
=
schema
::
NodeType
::
NodeType_ValueNode
;
...
@@ -452,7 +451,7 @@ int32_t fileIterator(mindspore::session::TrainSession *session, const std::strin
...
@@ -452,7 +451,7 @@ int32_t fileIterator(mindspore::session::TrainSession *session, const std::strin
int32_t
res
=
0
;
int32_t
res
=
0
;
if
(
auto
dir
=
opendir
(
path
.
c_str
()))
{
if
(
auto
dir
=
opendir
(
path
.
c_str
()))
{
while
(
auto
f
=
readdir
(
dir
))
{
while
(
auto
f
=
readdir
(
dir
))
{
if
(
!
f
->
d_name
||
f
->
d_name
[
0
]
==
'.'
)
continue
;
if
(
f
->
d_name
[
0
]
==
'.'
)
continue
;
if
(
f
->
d_type
==
DT_DIR
)
fileIterator
(
session
,
path
+
f
->
d_name
+
"/"
,
cb
);
if
(
f
->
d_type
==
DT_DIR
)
fileIterator
(
session
,
path
+
f
->
d_name
+
"/"
,
cb
);
if
(
f
->
d_type
==
DT_REG
)
if
(
f
->
d_type
==
DT_REG
)
...
@@ -462,11 +461,10 @@ int32_t fileIterator(mindspore::session::TrainSession *session, const std::strin
...
@@ -462,11 +461,10 @@ int32_t fileIterator(mindspore::session::TrainSession *session, const std::strin
}
}
return
res
;
return
res
;
}
}
#if 0
void
replaceExt
(
const
std
::
string
&
src
,
std
::
string
*
dst
)
{
void
replaceExt
(
const
std
::
string
&
src
,
std
::
string
*
dst
)
{
dst = &std::move(src.substr(0, src.find_last_of('.')) + ".emb")
;
*
dst
=
src
.
substr
(
0
,
src
.
find_last_of
(
'.'
))
+
".emb"
;
}
}
#endif
int32_t
runEffNet
(
mindspore
::
session
::
TrainSession
*
session
,
const
std
::
string
&
in
,
const
std
::
string
&
out
)
{
int32_t
runEffNet
(
mindspore
::
session
::
TrainSession
*
session
,
const
std
::
string
&
in
,
const
std
::
string
&
out
)
{
// setup input
// setup input
auto
inputs
=
session
->
GetInputs
();
auto
inputs
=
session
->
GetInputs
();
...
@@ -494,7 +492,6 @@ int32_t runEffNet(mindspore::session::TrainSession *session, const std::string &
...
@@ -494,7 +492,6 @@ int32_t runEffNet(mindspore::session::TrainSession *session, const std::string &
}
}
TEST_F
(
NetworkTest
,
efficient_net
)
{
TEST_F
(
NetworkTest
,
efficient_net
)
{
const
int
NUM_OF_INPUTS
=
1
;
char
*
buf
=
nullptr
;
char
*
buf
=
nullptr
;
size_t
net_size
=
0
;
size_t
net_size
=
0
;
std
::
string
net
=
"./test_data/nets/efficientnet_b0_f.ms"
;
std
::
string
net
=
"./test_data/nets/efficientnet_b0_f.ms"
;
...
@@ -520,7 +517,7 @@ TEST_F(NetworkTest, efficient_net) {
...
@@ -520,7 +517,7 @@ TEST_F(NetworkTest, efficient_net) {
int32_t res = 0;
int32_t res = 0;
if (in.find(".bin") != std::string::npos) {
if (in.find(".bin") != std::string::npos) {
std::string out;
std::string out;
replaceExt(in, out);
replaceExt(in,
&
out);
res = runEffNet(session, in, out);
res = runEffNet(session, in, out);
std::cout << "input file: " << in << (res ? " Fail" : " Pass") << std::endl;
std::cout << "input file: " << in << (res ? " Fail" : " Pass") << std::endl;
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录