Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
3e7ba14e
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
3e7ba14e
编写于
8月 05, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
8月 05, 2020
浏览文件
操作
浏览文件
下载
差异文件
!3828 add minddata lite
Merge pull request !3828 from 章一智/md-lite
上级
11b3c911
19b22166
变更
27
隐藏空白更改
内联
并排
Showing
27 changed file
with
784 addition
and
8 deletion
+784
-8
.gitmodules
.gitmodules
+10
-0
build.sh
build.sh
+46
-0
mindspore/ccsrc/minddata/dataset/api/de_tensor.cc
mindspore/ccsrc/minddata/dataset/api/de_tensor.cc
+198
-0
mindspore/ccsrc/minddata/dataset/api/execute.cc
mindspore/ccsrc/minddata/dataset/api/execute.cc
+54
-0
mindspore/ccsrc/minddata/dataset/core/client.h
mindspore/ccsrc/minddata/dataset/core/client.h
+3
-0
mindspore/ccsrc/minddata/dataset/core/tensor.cc
mindspore/ccsrc/minddata/dataset/core/tensor.cc
+4
-0
mindspore/ccsrc/minddata/dataset/core/tensor.h
mindspore/ccsrc/minddata/dataset/core/tensor.h
+14
-0
mindspore/ccsrc/minddata/dataset/engine/opt/pass.cc
mindspore/ccsrc/minddata/dataset/engine/opt/pass.cc
+6
-0
mindspore/ccsrc/minddata/dataset/engine/opt/pass.h
mindspore/ccsrc/minddata/dataset/engine/opt/pass.h
+4
-0
mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.cc
...c/minddata/dataset/engine/opt/pre/cache_transform_pass.cc
+11
-0
mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.h
...rc/minddata/dataset/engine/opt/pre/cache_transform_pass.h
+5
-0
mindspore/ccsrc/minddata/dataset/engine/opt/util/printer_pass.cc
...re/ccsrc/minddata/dataset/engine/opt/util/printer_pass.cc
+2
-1
mindspore/ccsrc/minddata/dataset/engine/opt/util/printer_pass.h
...ore/ccsrc/minddata/dataset/engine/opt/util/printer_pass.h
+2
-0
mindspore/ccsrc/minddata/dataset/include/de_tensor.h
mindspore/ccsrc/minddata/dataset/include/de_tensor.h
+75
-0
mindspore/ccsrc/minddata/dataset/include/execute.h
mindspore/ccsrc/minddata/dataset/include/execute.h
+51
-0
mindspore/ccsrc/minddata/dataset/include/tensor.h
mindspore/ccsrc/minddata/dataset/include/tensor.h
+21
-6
mindspore/ccsrc/minddata/dataset/kernels/image/resize_with_bbox_op.cc
...src/minddata/dataset/kernels/image/resize_with_bbox_op.cc
+0
-1
mindspore/lite/CMakeLists.txt
mindspore/lite/CMakeLists.txt
+26
-0
mindspore/lite/minddata/CMakeLists.txt
mindspore/lite/minddata/CMakeLists.txt
+47
-0
mindspore/lite/src/CMakeLists.txt
mindspore/lite/src/CMakeLists.txt
+7
-0
mindspore/lite/test/CMakeLists.txt
mindspore/lite/test/CMakeLists.txt
+19
-0
mindspore/lite/test/run_test.sh
mindspore/lite/test/run_test.sh
+6
-0
mindspore/lite/test/ut/src/dataset/de_tensor_test.cc
mindspore/lite/test/ut/src/dataset/de_tensor_test.cc
+98
-0
mindspore/lite/test/ut/src/dataset/eager_test.cc
mindspore/lite/test/ut/src/dataset/eager_test.cc
+72
-0
third_party/eigen
third_party/eigen
+1
-0
third_party/libjpeg-turbo
third_party/libjpeg-turbo
+1
-0
third_party/opencv
third_party/opencv
+1
-0
未找到文件。
.gitmodules
浏览文件 @
3e7ba14e
...
...
@@ -24,3 +24,13 @@
[submodule "third_party/OpenCL-Headers"]
path = third_party/OpenCL-Headers
url = https://github.com/KhronosGroup/OpenCL-Headers.git
[submodule "third_party/opencv"]
path = third_party/opencv
url = https://github.com/opencv/opencv.git
[submodule "third_party/eigen"]
path = third_party/eigen
url = https://gitlab.com/libeigen/eigen.git
[submodule "third_party/libjpeg-turbo"]
path = third_party/libjpeg-turbo
url = https://github.com/libjpeg-turbo/libjpeg-turbo.git
ignore = dirty
build.sh
浏览文件 @
3e7ba14e
...
...
@@ -519,6 +519,50 @@ build_opencl() {
fi
}
build_opencv
()
{
cd
${
BASEPATH
}
if
[[
"
${
INC_BUILD
}
"
==
"off"
]]
;
then
git submodule update
--init
--recursive
third_party/opencv
cd
${
BASEPATH
}
/third_party/opencv
rm
-rf
build
&&
mkdir
-p
build
&&
cd
build
&&
cmake
${
CMAKE_MINDDATA_ARGS
}
-DBUILD_SHARED_LIBS
=
ON
-DBUILD_ANDROID_PROJECTS
=
OFF
\
-DBUILD_LIST
=
core,imgcodecs,imgproc
-DBUILD_ZLIB
=
ON ..
&&
make
-j
$THREAD_NUM
fi
}
build_jpeg_turbo
()
{
cd
${
BASEPATH
}
if
[[
"
${
INC_BUILD
}
"
==
"off"
]]
;
then
git submodule update
--init
--recursive
third_party/libjpeg-turbo
cd
${
BASEPATH
}
/third_party/libjpeg-turbo
rm
-rf
build
&&
mkdir
-p
build
&&
cd
build
&&
cmake
${
CMAKE_MINDDATA_ARGS
}
-DCMAKE_BUILD_TYPE
=
Release
\
-DCMAKE_INSTALL_PREFIX
=
"
${
BASEPATH
}
/third_party/libjpeg-turbo"
..
&&
make
-j
$THREAD_NUM
&&
make
install
fi
}
build_eigen
()
{
cd
${
BASEPATH
}
git submodule update
--init
--recursive
third_party/eigen
}
build_minddata_lite_deps
()
{
echo
"start build minddata lite project"
if
[[
"
${
LITE_PLATFORM
}
"
==
"arm64"
]]
;
then
CMAKE_MINDDATA_ARGS
=
"-DCMAKE_TOOLCHAIN_FILE=
${
ANDROID_NDK
}
/build/cmake/android.toolchain.cmake -DANDROID_NATIVE_API_LEVEL=19
\
-DANDROID_NDK=
${
ANDROID_NDK
}
-DANDROID_ABI=arm64-v8a -DANDROID_TOOLCHAIN_NAME=aarch64-linux-android-clang
\
-DANDROID_STL=c++_shared -DCMAKE_BUILD_TYPE=
${
BUILD_TYPE
}
"
elif
[[
"
${
LITE_PLATFORM
}
"
==
"arm32"
]]
;
then
CMAKE_MINDDATA_ARGS
=
"-DCMAKE_TOOLCHAIN_FILE=
${
ANDROID_NDK
}
/build/cmake/android.toolchain.cmake -DANDROID_NATIVE_API_LEVEL=19
\
-DANDROID_NDK=
${
ANDROID_NDK
}
-DANDROID_ABI=armeabi-v7a -DANDROID_TOOLCHAIN_NAME=clang
\
-DANDROID_STL=c++_shared -DCMAKE_BUILD_TYPE=
${
BUILD_TYPE
}
"
else
CMAKE_MINDDATA_ARGS
=
"-DCMAKE_BUILD_TYPE=
${
BUILD_TYPE
}
"
fi
build_opencv
build_eigen
build_jpeg_turbo
}
build_lite
()
{
echo
"start build mindspore lite project"
...
...
@@ -533,6 +577,8 @@ build_lite()
build_flatbuffer
build_gtest
build_minddata_lite_deps
cd
"
${
BASEPATH
}
/mindspore/lite"
if
[[
"
${
INC_BUILD
}
"
==
"off"
]]
;
then
rm
-rf
build
...
...
mindspore/ccsrc/minddata/dataset/api/de_tensor.cc
0 → 100644
浏览文件 @
3e7ba14e
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "minddata/dataset/include/de_tensor.h"
#include "minddata/dataset/core/constants.h"
#include "minddata/dataset/core/data_type.h"
#include "mindspore/core/ir/dtype/type_id.h"
#include "utils/hashing.h"
#include "mindspore/lite/src/ir/tensor.h"
namespace
mindspore
{
namespace
tensor
{
dataset
::
DataType
MSTypeToDEType
(
TypeId
data_type
)
{
switch
(
data_type
)
{
case
kNumberTypeBool
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_BOOL
);
case
kNumberTypeInt8
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_INT8
);
case
kNumberTypeUInt8
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_UINT8
);
case
kNumberTypeInt16
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_INT16
);
case
kNumberTypeUInt16
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_UINT16
);
case
kNumberTypeInt32
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_INT32
);
case
kNumberTypeUInt32
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_UINT32
);
case
kNumberTypeInt64
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_INT64
);
case
kNumberTypeUInt64
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_UINT64
);
case
kNumberTypeFloat16
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_FLOAT16
);
case
kNumberTypeFloat32
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_FLOAT32
);
case
kNumberTypeFloat64
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_FLOAT64
);
default:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_UNKNOWN
);
}
}
TypeId
DETypeToMSType
(
dataset
::
DataType
data_type
)
{
switch
(
data_type
.
value
())
{
case
dataset
::
DataType
::
DE_BOOL
:
return
mindspore
::
TypeId
::
kNumberTypeBool
;
case
dataset
::
DataType
::
DE_INT8
:
return
mindspore
::
TypeId
::
kNumberTypeInt8
;
case
dataset
::
DataType
::
DE_UINT8
:
return
mindspore
::
TypeId
::
kNumberTypeUInt8
;
case
dataset
::
DataType
::
DE_INT16
:
return
mindspore
::
TypeId
::
kNumberTypeInt16
;
case
dataset
::
DataType
::
DE_UINT16
:
return
mindspore
::
TypeId
::
kNumberTypeUInt16
;
case
dataset
::
DataType
::
DE_INT32
:
return
mindspore
::
TypeId
::
kNumberTypeInt32
;
case
dataset
::
DataType
::
DE_UINT32
:
return
mindspore
::
TypeId
::
kNumberTypeUInt32
;
case
dataset
::
DataType
::
DE_INT64
:
return
mindspore
::
TypeId
::
kNumberTypeInt64
;
case
dataset
::
DataType
::
DE_UINT64
:
return
mindspore
::
TypeId
::
kNumberTypeUInt64
;
case
dataset
::
DataType
::
DE_FLOAT16
:
return
mindspore
::
TypeId
::
kNumberTypeFloat16
;
case
dataset
::
DataType
::
DE_FLOAT32
:
return
mindspore
::
TypeId
::
kNumberTypeFloat32
;
case
dataset
::
DataType
::
DE_FLOAT64
:
return
mindspore
::
TypeId
::
kNumberTypeFloat64
;
default:
return
kTypeUnknown
;
}
}
MSTensor
*
DETensor
::
CreateTensor
(
TypeId
data_type
,
const
std
::
vector
<
int
>
&
shape
)
{
return
new
DETensor
(
data_type
,
shape
);
}
MSTensor
*
DETensor
::
CreateTensor
(
const
std
::
string
&
path
)
{
std
::
shared_ptr
<
dataset
::
Tensor
>
t
;
(
void
)
dataset
::
Tensor
::
CreateFromFile
(
path
,
&
t
);
return
new
DETensor
(
std
::
move
(
t
));
}
DETensor
::
DETensor
(
TypeId
data_type
,
const
std
::
vector
<
int
>
&
shape
)
{
std
::
vector
<
dataset
::
dsize_t
>
t_shape
;
t_shape
.
reserve
(
shape
.
size
());
std
::
transform
(
shape
.
begin
(),
shape
.
end
(),
std
::
back_inserter
(
t_shape
),
[](
int
s
)
->
dataset
::
dsize_t
{
return
static_cast
<
dataset
::
dsize_t
>
(
s
);
});
dataset
::
Tensor
::
CreateEmpty
(
dataset
::
TensorShape
(
t_shape
),
MSTypeToDEType
(
data_type
),
&
this
->
tensor_impl_
);
}
DETensor
::
DETensor
(
std
::
shared_ptr
<
dataset
::
Tensor
>
tensor_ptr
)
{
this
->
tensor_impl_
=
std
::
move
(
tensor_ptr
);
}
MSTensor
*
DETensor
::
ConvertToLiteTensor
()
{
// static MSTensor::CreateTensor is only for the LiteTensor
MSTensor
*
tensor
=
MSTensor
::
CreateTensor
(
this
->
data_type
(),
this
->
shape
());
MS_ASSERT
(
tensor
->
Size
()
==
this
->
Size
());
memcpy_s
(
tensor
->
MutableData
(),
tensor
->
Size
(),
this
->
MutableData
(),
this
->
Size
());
return
tensor
;
}
std
::
shared_ptr
<
dataset
::
Tensor
>
DETensor
::
tensor
()
const
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
return
this
->
tensor_impl_
;
}
TypeId
DETensor
::
data_type
()
const
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
return
DETypeToMSType
(
this
->
tensor_impl_
->
type
());
}
TypeId
DETensor
::
set_data_type
(
TypeId
data_type
)
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
if
(
data_type
!=
this
->
data_type
())
{
std
::
shared_ptr
<
dataset
::
Tensor
>
temp
;
dataset
::
Tensor
::
CreateFromMemory
(
this
->
tensor_impl_
->
shape
(),
MSTypeToDEType
(
data_type
),
this
->
tensor_impl_
->
GetBuffer
(),
&
temp
);
this
->
tensor_impl_
=
temp
;
}
return
data_type
;
}
std
::
vector
<
int
>
DETensor
::
shape
()
const
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
std
::
vector
<
dataset
::
dsize_t
>
t_shape
=
this
->
tensor_impl_
->
shape
().
AsVector
();
std
::
vector
<
int
>
shape
;
shape
.
reserve
(
t_shape
.
size
());
std
::
transform
(
t_shape
.
begin
(),
t_shape
.
end
(),
std
::
back_inserter
(
shape
),
[](
dataset
::
dsize_t
s
)
->
int
{
return
static_cast
<
int
>
(
s
);
});
return
shape
;
}
size_t
DETensor
::
set_shape
(
const
std
::
vector
<
int
>
&
shape
)
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
std
::
vector
<
dataset
::
dsize_t
>
t_shape
;
t_shape
.
reserve
(
shape
.
size
());
std
::
transform
(
shape
.
begin
(),
shape
.
end
(),
std
::
back_inserter
(
t_shape
),
[](
int
s
)
->
dataset
::
dsize_t
{
return
static_cast
<
dataset
::
dsize_t
>
(
s
);
});
dataset
::
Status
rc
=
this
->
tensor_impl_
->
Reshape
(
dataset
::
TensorShape
(
t_shape
));
return
shape
.
size
();
}
int
DETensor
::
DimensionSize
(
size_t
index
)
const
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
int
dim_size
=
-
1
;
auto
shape
=
this
->
shape
();
if
(
index
<
shape
.
size
())
{
dim_size
=
shape
[
index
];
}
else
{
MS_LOG
(
ERROR
)
<<
"Dimension index is wrong: "
<<
index
;
}
return
dim_size
;
}
int
DETensor
::
ElementsNum
()
const
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
return
this
->
tensor_impl_
->
Size
();
}
std
::
size_t
DETensor
::
hash
()
const
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
auto
shape
=
this
->
shape
();
std
::
size_t
hash_value
=
std
::
hash
<
int
>
{}(
SizeToInt
(
this
->
data_type
()));
hash_value
=
hash_combine
(
hash_value
,
std
::
hash
<
size_t
>
{}(
shape
.
size
()));
// hash all elements may costly, so only take at most 4 elements into account based on
// some experiments.
for
(
size_t
i
=
0
;
(
i
<
shape
.
size
())
&&
(
i
<
4
);
++
i
)
{
hash_value
=
hash_combine
(
hash_value
,
(
std
::
hash
<
int
>
{}(
shape
[
i
])));
}
return
hash_value
;
}
size_t
DETensor
::
Size
()
const
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
return
this
->
tensor_impl_
->
SizeInBytes
();
}
void
*
DETensor
::
MutableData
()
const
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
return
this
->
tensor_impl_
->
GetMutableBuffer
();
}
}
// namespace tensor
}
// namespace mindspore
mindspore/ccsrc/minddata/dataset/api/execute.cc
0 → 100644
浏览文件 @
3e7ba14e
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "minddata/dataset/include/execute.h"
#include "minddata/dataset/include/de_tensor.h"
#include "minddata/dataset/include/tensor.h"
#include "minddata/dataset/kernels/tensor_op.h"
namespace
mindspore
{
namespace
dataset
{
namespace
api
{
Execute
::
Execute
(
std
::
shared_ptr
<
TensorOperation
>
op
)
:
op_
(
std
::
move
(
op
))
{}
std
::
shared_ptr
<
tensor
::
MSTensor
>
Execute
::
operator
()(
std
::
shared_ptr
<
tensor
::
MSTensor
>
input
)
{
// Build the op
if
(
op_
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Input TensorOperation is not valid"
;
return
nullptr
;
}
std
::
shared_ptr
<
Tensor
>
de_input
=
std
::
dynamic_pointer_cast
<
tensor
::
DETensor
>
(
input
)
->
tensor
();
if
(
de_input
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Input Tensor is not valid"
;
return
nullptr
;
}
std
::
shared_ptr
<
TensorOp
>
transform
=
op_
->
Build
();
std
::
shared_ptr
<
Tensor
>
de_output
;
Status
rc
=
transform
->
Compute
(
de_input
,
&
de_output
);
if
(
rc
.
IsError
())
{
// execution failed
MS_LOG
(
ERROR
)
<<
"Operation execution failed : "
<<
rc
.
ToString
();
return
nullptr
;
}
return
std
::
make_shared
<
tensor
::
DETensor
>
(
std
::
move
(
de_output
));
}
}
// namespace api
}
// namespace dataset
}
// namespace mindspore
mindspore/ccsrc/minddata/dataset/core/client.h
浏览文件 @
3e7ba14e
...
...
@@ -25,8 +25,11 @@
#include "minddata/dataset/core/tensor_shape.h"
#include "minddata/dataset/engine/data_schema.h"
#include "minddata/dataset/engine/dataset_iterator.h"
#ifndef ENABLE_ANDROID
#include "minddata/dataset/engine/datasetops/source/mindrecord_op.h"
#include "minddata/dataset/engine/datasetops/source/tf_reader_op.h"
#endif
#ifdef ENABLE_PYTHON
#include "minddata/dataset/engine/datasetops/barrier_op.h"
...
...
mindspore/ccsrc/minddata/dataset/core/tensor.cc
浏览文件 @
3e7ba14e
...
...
@@ -213,6 +213,7 @@ Status Tensor::CreateFromNpArray(const py::array &arr, std::shared_ptr<Tensor> *
}
#endif
#ifndef ENABLE_ANDROID
Status
Tensor
::
CreateFromByteList
(
const
dataengine
::
BytesList
&
bytes_list
,
const
TensorShape
&
shape
,
TensorPtr
*
out
)
{
const
TensorAlloc
*
alloc
=
GlobalContext
::
Instance
()
->
tensor_allocator
();
*
out
=
std
::
allocate_shared
<
Tensor
>
(
*
alloc
,
TensorShape
({
static_cast
<
dsize_t
>
(
bytes_list
.
value_size
())}),
...
...
@@ -255,6 +256,7 @@ Status Tensor::CreateFromByteList(const dataengine::BytesList &bytes_list, const
(
*
out
)
->
Reshape
(
shape
);
return
Status
::
OK
();
}
#endif
Status
Tensor
::
CreateFromFile
(
const
std
::
string
&
path
,
std
::
shared_ptr
<
Tensor
>
*
out
)
{
std
::
ifstream
fs
;
...
...
@@ -269,6 +271,7 @@ Status Tensor::CreateFromFile(const std::string &path, std::shared_ptr<Tensor> *
return
Status
::
OK
();
}
#ifndef ENABLE_ANDROID
Status
Tensor
::
CreateFromByteList
(
const
dataengine
::
BytesList
&
bytes_list
,
const
TensorShape
&
shape
,
const
DataType
&
type
,
dsize_t
pad_size
,
TensorPtr
*
out
)
{
RETURN_IF_NOT_OK
(
Tensor
::
CreateEmpty
(
shape
,
type
,
out
));
...
...
@@ -298,6 +301,7 @@ Status Tensor::CreateFromByteList(const dataengine::BytesList &bytes_list, const
return
Status
::
OK
();
}
#endif
// Memcpy the given strided array's used part to consecutive memory
// Consider a 3-d array
...
...
mindspore/ccsrc/minddata/dataset/core/tensor.h
浏览文件 @
3e7ba14e
...
...
@@ -38,12 +38,21 @@
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/tensor_shape.h"
#include "minddata/dataset/util/status.h"
#ifndef ENABLE_ANDROID
#include "proto/example.pb.h"
#else
#include "minddata/dataset/include/de_tensor.h"
#endif
#ifdef ENABLE_PYTHON
namespace
py
=
pybind11
;
#endif
namespace
mindspore
{
#ifdef ENABLE_ANDROID
namespace
tensor
{
class
DETensor
;
}
// namespace tensor
#endif
namespace
dataset
{
class
Tensor
;
template
<
typename
T
>
...
...
@@ -117,6 +126,7 @@ class Tensor {
static
Status
CreateFromNpArray
(
const
py
::
array
&
arr
,
TensorPtr
*
out
);
#endif
#ifndef ENABLE_ANDROID
/// Create a tensor of type DE_STRING from a BytesList.
/// \param[in] bytes_list protobuf's Bytelist
/// \param[in] shape shape of the outout tensor
...
...
@@ -134,6 +144,7 @@ class Tensor {
/// \return Status Code
static
Status
CreateFromByteList
(
const
dataengine
::
BytesList
&
bytes_list
,
const
TensorShape
&
shape
,
const
DataType
&
type
,
dsize_t
pad_size
,
TensorPtr
*
out
);
#endif
/// Create a Tensor from a given list of values.
/// \tparam type of the values to be inserted.
...
...
@@ -649,6 +660,9 @@ class Tensor {
unsigned
char
*
data_end_
=
nullptr
;
private:
#ifdef ENABLE_ANDROID
friend
class
tensor
::
DETensor
;
#endif
/// Copy raw data of a array based on shape and strides to the destination pointer
/// \param dst [out] Pointer to the destination array where the content is to be copied
/// \param[in] src Pointer to the source of strided array to be copied
...
...
mindspore/ccsrc/minddata/dataset/engine/opt/pass.cc
浏览文件 @
3e7ba14e
...
...
@@ -34,10 +34,14 @@
#include "minddata/dataset/engine/datasetops/source/cifar_op.h"
#include "minddata/dataset/engine/datasetops/source/coco_op.h"
#include "minddata/dataset/engine/datasetops/source/manifest_op.h"
#ifndef ENABLE_ANDROID
#include "minddata/dataset/engine/datasetops/source/mindrecord_op.h"
#endif
#include "minddata/dataset/engine/datasetops/source/mnist_op.h"
#include "minddata/dataset/engine/datasetops/source/random_data_op.h"
#ifndef ENABLE_ANDROID
#include "minddata/dataset/engine/datasetops/source/tf_reader_op.h"
#endif
#include "minddata/dataset/engine/datasetops/source/voc_op.h"
#ifdef ENABLE_PYTHON
#include "minddata/dataset/engine/datasetops/filter_op.h"
...
...
@@ -136,6 +140,7 @@ Status NodePass::RunOnNode(std::shared_ptr<ShuffleOp> node, bool *modified) {
return
RunOnNode
(
std
::
static_pointer_cast
<
DatasetOp
>
(
node
),
modified
);
}
#ifndef ENABLE_ANDROID
Status
NodePass
::
RunOnNode
(
std
::
shared_ptr
<
MindRecordOp
>
node
,
bool
*
modified
)
{
// Fallback to base class visitor by default
return
RunOnNode
(
std
::
static_pointer_cast
<
DatasetOp
>
(
node
),
modified
);
...
...
@@ -145,6 +150,7 @@ Status NodePass::RunOnNode(std::shared_ptr<TFReaderOp> node, bool *modified) {
// Fallback to base class visitor by default
return
RunOnNode
(
std
::
static_pointer_cast
<
DatasetOp
>
(
node
),
modified
);
}
#endif
#ifdef ENABLE_PYTHON
Status
NodePass
::
RunOnNode
(
std
::
shared_ptr
<
FilterOp
>
node
,
bool
*
modified
)
{
...
...
mindspore/ccsrc/minddata/dataset/engine/opt/pass.h
浏览文件 @
3e7ba14e
...
...
@@ -37,9 +37,11 @@ class SkipOp;
class
ShuffleOp
;
#ifndef ENABLE_ANDROID
class
MindRecordOp
;
class
TFReaderOp
;
#endif
#ifdef ENABLE_PYTHON
class
FilterOp
;
...
...
@@ -158,9 +160,11 @@ class NodePass : public Pass {
virtual
Status
RunOnNode
(
std
::
shared_ptr
<
ShuffleOp
>
node
,
bool
*
modified
);
#ifndef ENABLE_ANDROID
virtual
Status
RunOnNode
(
std
::
shared_ptr
<
MindRecordOp
>
node
,
bool
*
modified
);
virtual
Status
RunOnNode
(
std
::
shared_ptr
<
TFReaderOp
>
node
,
bool
*
modified
);
#endif
#ifdef ENABLE_PYTHON
virtual
Status
RunOnNode
(
std
::
shared_ptr
<
FilterOp
>
node
,
bool
*
modified
);
...
...
mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.cc
浏览文件 @
3e7ba14e
...
...
@@ -25,10 +25,17 @@
#include "minddata/dataset/engine/datasetops/source/cifar_op.h"
#include "minddata/dataset/engine/datasetops/source/coco_op.h"
#include "minddata/dataset/engine/datasetops/source/image_folder_op.h"
#ifndef ENABLE_ANDROID
#include "minddata/dataset/engine/datasetops/source/mindrecord_op.h"
#endif
#include "minddata/dataset/engine/datasetops/source/mnist_op.h"
#include "minddata/dataset/engine/datasetops/source/random_data_op.h"
#ifndef ENABLE_ANDROID
#include "minddata/dataset/engine/datasetops/source/tf_reader_op.h"
#endif
#ifdef ENABLE_PYTHON
#include "minddata/dataset/engine/datasetops/source/generator_op.h"
...
...
@@ -123,6 +130,7 @@ Status CacheTransformPass::CachePass::NonMappableCacheLeafSetup(std::shared_ptr<
return
Status
::
OK
();
}
#ifndef ENABLE_ANDROID
// Perform leaf node cache transform identification
Status
CacheTransformPass
::
CachePass
::
RunOnNode
(
std
::
shared_ptr
<
TFReaderOp
>
node
,
bool
*
modified
)
{
if
(
is_caching_
)
{
...
...
@@ -132,6 +140,7 @@ Status CacheTransformPass::CachePass::RunOnNode(std::shared_ptr<TFReaderOp> node
}
return
NonMappableCacheLeafSetup
(
std
::
static_pointer_cast
<
DatasetOp
>
(
node
));
}
#endif
// Perform leaf node cache transform identification
Status
CacheTransformPass
::
CachePass
::
RunOnNode
(
std
::
shared_ptr
<
RandomDataOp
>
node
,
bool
*
modified
)
{
...
...
@@ -163,10 +172,12 @@ Status CacheTransformPass::CachePass::RunOnNode(std::shared_ptr<CelebAOp> node,
return
MappableCacheLeafSetup
(
std
::
static_pointer_cast
<
DatasetOp
>
(
node
));
}
#ifndef ENABLE_ANDROID
// Perform leaf node cache transform identification
Status
CacheTransformPass
::
CachePass
::
RunOnNode
(
std
::
shared_ptr
<
MindRecordOp
>
node
,
bool
*
modified
)
{
return
MappableCacheLeafSetup
(
std
::
static_pointer_cast
<
DatasetOp
>
(
node
));
}
#endif
#ifdef ENABLE_PYTHON
// Perform leaf node cache transform identification
...
...
mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.h
浏览文件 @
3e7ba14e
...
...
@@ -58,11 +58,14 @@ class CacheTransformPass : public TreePass {
/// \return Status The error code return
Status
RunOnNode
(
std
::
shared_ptr
<
CacheOp
>
node
,
bool
*
modified
)
override
;
#ifndef ENABLE_ANDROID
/// \brief Perform leaf node cache tranform identifications
/// \param[in] node The node being visited
/// \param[inout] modified Indicator if the node was changed at all
/// \return Status The error code return
Status
RunOnNode
(
std
::
shared_ptr
<
TFReaderOp
>
node
,
bool
*
modified
)
override
;
#endif
/// \brief Perform leaf node cache tranform identifications
/// \param[in] node The node being visited
...
...
@@ -120,11 +123,13 @@ class CacheTransformPass : public TreePass {
/// \return Status The error code return
Status
RunOnNode
(
std
::
shared_ptr
<
CelebAOp
>
node
,
bool
*
modified
)
override
;
#ifndef ENABLE_ANDROID
/// \brief Perform leaf node cache tranform identifications
/// \param[in] node The node being visited
/// \param[inout] modified Indicator if the node was changed at all
/// \return Status The error code return
Status
RunOnNode
(
std
::
shared_ptr
<
MindRecordOp
>
node
,
bool
*
modified
)
override
;
#endif
/// \brief Getter
std
::
vector
<
std
::
pair
<
std
::
shared_ptr
<
DatasetOp
>
,
std
::
shared_ptr
<
CacheOp
>>>
cache_pairs
()
{
return
cache_pairs_
;
}
...
...
mindspore/ccsrc/minddata/dataset/engine/opt/util/printer_pass.cc
浏览文件 @
3e7ba14e
...
...
@@ -60,7 +60,7 @@ Status PrinterPass::RunOnNode(std::shared_ptr<ShuffleOp> node, bool *modified) {
std
::
cout
<<
"Visiting ShuffleOp"
<<
'\n'
;
return
Status
::
OK
();
}
#ifndef ENABLE_ANDROID
Status
PrinterPass
::
RunOnNode
(
std
::
shared_ptr
<
MindRecordOp
>
node
,
bool
*
modified
)
{
*
modified
=
false
;
std
::
cout
<<
"Visiting MindRecordOp"
<<
'\n'
;
...
...
@@ -72,6 +72,7 @@ Status PrinterPass::RunOnNode(std::shared_ptr<TFReaderOp> node, bool *modified)
std
::
cout
<<
"Visiting TFReaderOp"
<<
'\n'
;
return
Status
::
OK
();
}
#endif
#ifdef ENABLE_PYTHON
Status
PrinterPass
::
RunOnNode
(
std
::
shared_ptr
<
FilterOp
>
node
,
bool
*
modified
)
{
...
...
mindspore/ccsrc/minddata/dataset/engine/opt/util/printer_pass.h
浏览文件 @
3e7ba14e
...
...
@@ -39,9 +39,11 @@ class PrinterPass : public NodePass {
Status
RunOnNode
(
std
::
shared_ptr
<
ShuffleOp
>
node
,
bool
*
modified
)
override
;
#ifndef ENABLE_ANDROID
Status
RunOnNode
(
std
::
shared_ptr
<
MindRecordOp
>
node
,
bool
*
modified
)
override
;
Status
RunOnNode
(
std
::
shared_ptr
<
TFReaderOp
>
node
,
bool
*
modified
)
override
;
#endif
#ifdef ENABLE_PYTHON
Status
RunOnNode
(
std
::
shared_ptr
<
FilterOp
>
node
,
bool
*
modified
)
override
;
...
...
mindspore/ccsrc/minddata/dataset/include/de_tensor.h
0 → 100644
浏览文件 @
3e7ba14e
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_API_DETENSOR_H_
#define MINDSPORE_CCSRC_MINDDATA_DATASET_API_DETENSOR_H_
#include <string>
#include <vector>
#include <memory>
#include "include/ms_tensor.h"
#include "minddata/dataset/include/tensor.h"
#include "minddata/dataset/util/status.h"
namespace
mindspore
{
namespace
tensor
{
class
DETensor
:
public
MSTensor
{
public:
/// \brief Create a MSTensor pointer.
/// \param[data_type] DataTypeId of tensor to be created.
/// \param[shape] Shape of tensor to be created.
/// \return - MSTensor pointer.
static
MSTensor
*
CreateTensor
(
TypeId
data_type
,
const
std
::
vector
<
int
>
&
shape
);
/// \brief Create a MSTensor pointer.
/// \param[path] Path file to be read.
/// \return - MSTensor pointer.
static
MSTensor
*
CreateTensor
(
const
std
::
string
&
path
);
DETensor
(
TypeId
data_type
,
const
std
::
vector
<
int
>
&
shape
);
explicit
DETensor
(
std
::
shared_ptr
<
dataset
::
Tensor
>
tensor_ptr
);
~
DETensor
()
=
default
;
/// \brief Create a duplicate instance, convert the DETensor to the LiteTensor.
/// \return - MSTensor pointer.
MSTensor
*
ConvertToLiteTensor
();
std
::
shared_ptr
<
dataset
::
Tensor
>
tensor
()
const
;
TypeId
data_type
()
const
override
;
TypeId
set_data_type
(
const
TypeId
data_type
)
override
;
std
::
vector
<
int
>
shape
()
const
override
;
size_t
set_shape
(
const
std
::
vector
<
int
>
&
shape
)
override
;
int
DimensionSize
(
size_t
index
)
const
override
;
int
ElementsNum
()
const
override
;
std
::
size_t
hash
()
const
override
;
size_t
Size
()
const
override
;
void
*
MutableData
()
const
override
;
protected:
std
::
shared_ptr
<
dataset
::
Tensor
>
tensor_impl_
;
};
}
// namespace tensor
}
// namespace mindspore
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_API_DETENSOR_H_
mindspore/ccsrc/minddata/dataset/include/execute.h
0 → 100644
浏览文件 @
3e7ba14e
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef DATASET_API_EXECUTE_H_
#define DATASET_API_EXECUTE_H_
#include <vector>
#include <memory>
#include "minddata/dataset/core/constants.h"
#include "minddata/dataset/include/de_tensor.h"
#include "minddata/dataset/include/transforms.h"
namespace
mindspore
{
namespace
dataset
{
class
TensorOp
;
namespace
api
{
// class to run tensor operations in eager mode
class
Execute
{
public:
/// \brief Constructor
explicit
Execute
(
std
::
shared_ptr
<
TensorOperation
>
op
);
/// \brief callable function to execute the TensorOperation in eager mode
/// \param[inout] input - the tensor to be transformed
/// \return - the output tensor, nullptr if Compute fails
std
::
shared_ptr
<
tensor
::
MSTensor
>
operator
()(
std
::
shared_ptr
<
tensor
::
MSTensor
>
input
);
private:
std
::
shared_ptr
<
TensorOperation
>
op_
;
};
}
// namespace api
}
// namespace dataset
}
// namespace mindspore
#endif // DATASET_API_EXECUTE_H_
mindspore/ccsrc/minddata/dataset/include/tensor.h
浏览文件 @
3e7ba14e
...
...
@@ -38,12 +38,21 @@
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/tensor_shape.h"
#include "minddata/dataset/util/status.h"
#ifndef ENABLE_ANDROID
#include "proto/example.pb.h"
#else
#include "minddata/dataset/include/de_tensor.h"
#endif
#ifdef ENABLE_PYTHON
namespace
py
=
pybind11
;
#endif
namespace
mindspore
{
#ifdef ENABLE_ANDROID
namespace
tensor
{
class
DETensor
;
}
// namespace tensor
#endif
namespace
dataset
{
class
Tensor
;
template
<
typename
T
>
...
...
@@ -117,6 +126,7 @@ class Tensor {
static
Status
CreateFromNpArray
(
const
py
::
array
&
arr
,
TensorPtr
*
out
);
#endif
#ifndef ENABLE_ANDROID
/// Create a tensor of type DE_STRING from a BytesList.
/// \param[in] bytes_list protobuf's Bytelist
/// \param[in] shape shape of the outout tensor
...
...
@@ -134,6 +144,7 @@ class Tensor {
/// \return Status Code
static
Status
CreateFromByteList
(
const
dataengine
::
BytesList
&
bytes_list
,
const
TensorShape
&
shape
,
const
DataType
&
type
,
dsize_t
pad_size
,
TensorPtr
*
out
);
#endif
/// Create a Tensor from a given list of values.
/// \tparam type of the values to be inserted.
...
...
@@ -649,12 +660,8 @@ class Tensor {
unsigned
char
*
data_end_
=
nullptr
;
private:
#ifdef ENABLE_PYTHON
/// Helper function to create a tensor from Numpy array of strings
/// \param[in] arr Numpy array
/// \param[out] out Created Tensor
/// \return Status
static
Status
CreateFromNpString
(
py
::
array
arr
,
TensorPtr
*
out
);
#ifdef ENABLE_ANDROID
friend
class
tensor
::
DETensor
;
#endif
/// Copy raw data of a array based on shape and strides to the destination pointer
/// \param dst [out] Pointer to the destination array where the content is to be copied
...
...
@@ -668,6 +675,14 @@ class Tensor {
/// const of the size of the offset variable
static
constexpr
uint8_t
kOffsetSize
=
sizeof
(
offset_t
);
#ifdef ENABLE_PYTHON
/// Helper function to create a tensor from Numpy array of strings
/// \param[in] arr Numpy array
/// \param[out] out Created Tensor
/// \return Status
static
Status
CreateFromNpString
(
py
::
array
arr
,
TensorPtr
*
out
);
#endif
};
template
<
>
inline
Tensor
::
TensorIterator
<
std
::
string_view
>
Tensor
::
end
<
std
::
string_view
>
()
{
...
...
mindspore/ccsrc/minddata/dataset/kernels/image/resize_with_bbox_op.cc
浏览文件 @
3e7ba14e
...
...
@@ -20,7 +20,6 @@
#include "minddata/dataset/kernels/image/resize_op.h"
#include "minddata/dataset/kernels/image/image_utils.h"
#include "minddata/dataset/core/cv_tensor.h"
#include "minddata/dataset/core/pybind_support.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/kernels/tensor_op.h"
#include "minddata/dataset/util/status.h"
...
...
mindspore/lite/CMakeLists.txt
浏览文件 @
3e7ba14e
...
...
@@ -33,6 +33,7 @@ option(BUILD_CONVERTER "if build converter" on)
option
(
ENABLE_FP16
"if build fp16 ops"
off
)
option
(
SUPPORT_GPU
"if support gpu"
off
)
option
(
OFFLINE_COMPILE
"if offline compile OpenCL kernel"
off
)
option
(
BUILD_MINDDATA
""
on
)
if
(
BUILD_DEVICE
)
add_compile_definitions
(
BUILD_DEVICE
)
...
...
@@ -116,6 +117,31 @@ if (BUILD_DEVICE)
set
(
CMAKE_CXX_FLAGS
"
${
CMAKE_CXX_FLAGS
}
-march=armv8.2-a+dotprod+fp16"
)
endif
()
endif
()
endif
()
if
(
BUILD_MINDDATA
)
# opencv
set
(
OpenCV_DIR
${
TOP_DIR
}
/third_party/opencv/build
)
find_package
(
OpenCV REQUIRED
)
include_directories
(
${
OpenCV_INCLUDE_DIRS
}
)
# eigen
include_directories
(
${
TOP_DIR
}
/third_party/eigen/
)
# jpeg-turbo
add_library
(
jpeg-turbo SHARED IMPORTED
)
set_target_properties
(
jpeg-turbo PROPERTIES
IMPORTED_LOCATION
${
TOP_DIR
}
/third_party/libjpeg-turbo/lib/libturbojpeg.so
)
add_library
(
jpeg SHARED IMPORTED
)
set_target_properties
(
jpeg PROPERTIES
IMPORTED_LOCATION
${
TOP_DIR
}
/third_party/libjpeg-turbo/lib/libjpeg.so
)
include_directories
(
${
TOP_DIR
}
/third_party/libjpeg-turbo/include
)
add_compile_definitions
(
ENABLE_ANDROID
)
add_subdirectory
(
${
CMAKE_CURRENT_SOURCE_DIR
}
/minddata
)
endif
()
if
(
BUILD_DEVICE
)
add_subdirectory
(
${
CMAKE_CURRENT_SOURCE_DIR
}
/src
)
add_subdirectory
(
${
CMAKE_CURRENT_SOURCE_DIR
}
/tools/benchmark
)
add_subdirectory
(
${
CMAKE_CURRENT_SOURCE_DIR
}
/test
)
...
...
mindspore/lite/minddata/CMakeLists.txt
0 → 100644
浏览文件 @
3e7ba14e
set
(
MINDDATA_DIR
${
CCSRC_DIR
}
/minddata/dataset
)
set
(
CMAKE_CXX_STANDARD 17
)
set
(
CMAKE_CXX_FLAGS
"
${
CMAKE_CXX_FLAGS
}
-std=c++17"
)
set
(
CMAKE_C_FLAGS
"
${
CMAKE_C_FLAGS
}
"
)
set
(
CMAKE_CXX_FLAGS
"
${
CMAKE_CXX_FLAGS
}
-fPIC -Wall -Wno-deprecated-declarations"
)
set
(
CMAKE_CXX_FLAGS_DEBUG
"$ENV{CXXFLAGS} -O0 -g2 -ggdb"
)
if
(
CMAKE_BUILD_TYPE EQUAL
"DEBUG"
)
set
(
CMAKE_SHARED_LINKER_FLAGS
"
${
CMAKE_SHARED_LINKER_FLAGS
}
-s"
)
endif
()
AUX_SOURCE_DIRECTORY
(
${
MINDDATA_DIR
}
/core MINDDATA_CORE_SRC_FILES
)
list
(
REMOVE_ITEM MINDDATA_CORE_SRC_FILES
"
${
MINDDATA_DIR
}
/core/client.cc"
)
AUX_SOURCE_DIRECTORY
(
${
MINDDATA_DIR
}
/kernels MINDDATA_KERNELS_SRC_FILES
)
list
(
REMOVE_ITEM MINDDATA_KERNELS_SRC_FILES
"
${
MINDDATA_DIR
}
/kernels/py_func_op.cc"
)
AUX_SOURCE_DIRECTORY
(
${
MINDDATA_DIR
}
/kernels/image MINDDATA_KERNELS_IMAGE_SRC_FILES
)
AUX_SOURCE_DIRECTORY
(
${
MINDDATA_DIR
}
/kernels/data MINDDATA_KERNELS_DATA_SRC_FILES
)
add_library
(
minddata-eager OBJECT
${
MINDDATA_DIR
}
/api/de_tensor.cc
${
MINDDATA_DIR
}
/api/execute.cc
)
add_library
(
minddata-lite SHARED
${
MINDDATA_CORE_SRC_FILES
}
${
MINDDATA_KERNELS_SRC_FILES
}
${
MINDDATA_KERNELS_IMAGE_SRC_FILES
}
${
MINDDATA_KERNELS_DATA_SRC_FILES
}
${
MINDDATA_DIR
}
/util/status.cc
${
MINDDATA_DIR
}
/util/memory_pool.cc
${
MINDDATA_DIR
}
/util/path.cc
${
MINDDATA_DIR
}
/api/transforms.cc
${
CORE_DIR
}
/utils/log_adapter.cc
${
CCSRC_DIR
}
/gvar/logging_level.cc
)
target_link_libraries
(
minddata-lite
securec
jpeg-turbo
jpeg
opencv_core
opencv_imgcodecs
opencv_imgproc
mindspore::json
)
\ No newline at end of file
mindspore/lite/src/CMakeLists.txt
浏览文件 @
3e7ba14e
...
...
@@ -80,5 +80,12 @@ target_link_libraries(mindspore-lite
)
add_subdirectory
(
runtime/kernel/arm
)
if
(
BUILD_MINDDATA
)
target_link_libraries
(
mindspore-lite minddata-eager minddata-lite
)
if
(
PLATFORM_ARM32 OR PLATFORM_ARM64
)
target_link_libraries
(
mindspore-lite log
)
endif
()
endif
()
add_subdirectory
(
ops
)
mindspore/lite/test/CMakeLists.txt
浏览文件 @
3e7ba14e
...
...
@@ -129,6 +129,15 @@ if (SUPPORT_GPU)
${
LITE_DIR
}
/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc
)
endif
()
### minddata lite
if
(
BUILD_MINDDATA
)
include_directories
(
${
CCSRC_DIR
}
/minddata
)
set
(
DATASET_TEST_DIR
${
TEST_DIR
}
/ut/src/dataset
)
set
(
TEST_MINDDATA_SRC
${
DATASET_TEST_DIR
}
/de_tensor_test.cc
${
DATASET_TEST_DIR
}
/eager_test.cc
)
endif
()
### runtime framework
file
(
GLOB_RECURSE OPS_SRC
${
LITE_DIR
}
/src/ops/*.cc
)
set
(
TEST_LITE_SRC
...
...
@@ -245,6 +254,7 @@ file(GLOB_RECURSE TEST_CASE_KERNEL_SRC
set
(
TEST_SRC
${
TEST_LITE_SRC
}
${
TEST_MINDDATA_SRC
}
${
TEST_CASE_KERNEL_SRC
}
${
TEST_DIR
}
/common/common_test.cc
${
TEST_DIR
}
/main.cc
...
...
@@ -284,6 +294,15 @@ endif ()
add_executable
(
lite-test
${
TEST_SRC
}
)
target_link_libraries
(
lite-test dl
${
SECUREC_LIBRARY
}
${
GTEST_LIBRARY
}
mindspore::json
)
if
(
BUILD_MINDDATA
)
target_link_libraries
(
lite-test
minddata-lite
minddata-eager
)
if
(
PLATFORM_ARM32 OR PLATFORM_ARM64
)
target_link_libraries
(
lite-test log
)
endif
()
endif
()
if
(
BUILD_CONVERTER
)
target_link_libraries
(
lite-test
anf_exporter_mid
...
...
mindspore/lite/test/run_test.sh
100644 → 100755
浏览文件 @
3e7ba14e
...
...
@@ -7,6 +7,12 @@ mkdir -pv ${CUR_DIR}/do_test
cd
${
CUR_DIR
}
/do_test
cp
${
BUILD_DIR
}
/test/lite-test ./
cp
-r
${
CUR_DIR
}
/ut/src/runtime/kernel/arm/test_data/
*
./
## prepare data for dataset
TEST_DATA_DIR
=
${
CUR_DIR
}
/../../../tests/ut/data/dataset/
cp
-fr
$TEST_DATA_DIR
/testPK ./data
./lite-test
--gtest_filter
=
"*MindDataTestTensorDE*"
./lite-test
--gtest_filter
=
"*MindDataTestEager*"
./lite-test
--gtest_filter
=
"*TestHebing*"
...
...
mindspore/lite/test/ut/src/dataset/de_tensor_test.cc
0 → 100644
浏览文件 @
3e7ba14e
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <memory>
#include <string>
#include "common/common_test.h"
#include "gtest/gtest.h"
#include "./securec.h"
#include "dataset/core/tensor.h"
#include "dataset/core/cv_tensor.h"
#include "dataset/core/data_type.h"
#include "mindspore/lite/src/ir/tensor.h"
using
MSTensor
=
mindspore
::
tensor
::
MSTensor
;
using
DETensor
=
mindspore
::
tensor
::
DETensor
;
using
LiteTensor
=
mindspore
::
lite
::
tensor
::
LiteTensor
;
using
Tensor
=
mindspore
::
dataset
::
Tensor
;
using
DataType
=
mindspore
::
dataset
::
DataType
;
using
TensorShape
=
mindspore
::
dataset
::
TensorShape
;
class
MindDataTestTensorDE
:
public
mindspore
::
Common
{
public:
MindDataTestTensorDE
()
{}
};
TEST_F
(
MindDataTestTensorDE
,
MSTensorBasic
)
{
std
::
shared_ptr
<
Tensor
>
t
=
std
::
make_shared
<
Tensor
>
(
TensorShape
({
2
,
3
}),
DataType
(
DataType
::
DE_FLOAT32
));
auto
ms_tensor
=
std
::
shared_ptr
<
MSTensor
>
(
new
DETensor
(
t
));
ASSERT_EQ
(
t
==
std
::
dynamic_pointer_cast
<
DETensor
>
(
ms_tensor
)
->
tensor
(),
true
);
}
TEST_F
(
MindDataTestTensorDE
,
MSTensorConvertToLiteTensor
)
{
std
::
shared_ptr
<
Tensor
>
t
=
std
::
make_shared
<
Tensor
>
(
TensorShape
({
2
,
3
}),
DataType
(
DataType
::
DE_FLOAT32
));
auto
ms_tensor
=
std
::
shared_ptr
<
DETensor
>
(
new
DETensor
(
t
));
std
::
shared_ptr
<
MSTensor
>
lite_ms_tensor
=
std
::
shared_ptr
<
MSTensor
>
(
std
::
dynamic_pointer_cast
<
DETensor
>
(
ms_tensor
)
->
ConvertToLiteTensor
());
// check if the lite_ms_tensor is the derived LiteTensor
LiteTensor
*
lite_tensor
=
static_cast
<
LiteTensor
*>
(
lite_ms_tensor
.
get
());
ASSERT_EQ
(
lite_tensor
!=
nullptr
,
true
);
}
TEST_F
(
MindDataTestTensorDE
,
MSTensorShape
)
{
std
::
shared_ptr
<
Tensor
>
t
=
std
::
make_shared
<
Tensor
>
(
TensorShape
({
2
,
3
}),
DataType
(
DataType
::
DE_FLOAT32
));
auto
ms_tensor
=
std
::
shared_ptr
<
MSTensor
>
(
new
DETensor
(
t
));
ASSERT_EQ
(
ms_tensor
->
DimensionSize
(
0
)
==
2
,
true
);
ASSERT_EQ
(
ms_tensor
->
DimensionSize
(
1
)
==
3
,
true
);
ms_tensor
->
set_shape
(
std
::
vector
<
int
>
{
3
,
2
});
ASSERT_EQ
(
ms_tensor
->
DimensionSize
(
0
)
==
3
,
true
);
ASSERT_EQ
(
ms_tensor
->
DimensionSize
(
1
)
==
2
,
true
);
ms_tensor
->
set_shape
(
std
::
vector
<
int
>
{
6
});
ASSERT_EQ
(
ms_tensor
->
DimensionSize
(
0
)
==
6
,
true
);
}
TEST_F
(
MindDataTestTensorDE
,
MSTensorSize
)
{
std
::
shared_ptr
<
Tensor
>
t
=
std
::
make_shared
<
Tensor
>
(
TensorShape
({
2
,
3
}),
DataType
(
DataType
::
DE_FLOAT32
));
auto
ms_tensor
=
std
::
shared_ptr
<
MSTensor
>
(
new
DETensor
(
t
));
ASSERT_EQ
(
ms_tensor
->
ElementsNum
()
==
6
,
true
);
ASSERT_EQ
(
ms_tensor
->
Size
()
==
24
,
true
);
}
TEST_F
(
MindDataTestTensorDE
,
MSTensorDataType
)
{
std
::
shared_ptr
<
Tensor
>
t
=
std
::
make_shared
<
Tensor
>
(
TensorShape
({
2
,
3
}),
DataType
(
DataType
::
DE_FLOAT32
));
auto
ms_tensor
=
std
::
shared_ptr
<
MSTensor
>
(
new
DETensor
(
t
));
ASSERT_EQ
(
ms_tensor
->
data_type
()
==
mindspore
::
TypeId
::
kNumberTypeFloat32
,
true
);
ms_tensor
->
set_data_type
(
mindspore
::
TypeId
::
kNumberTypeInt32
);
ASSERT_EQ
(
ms_tensor
->
data_type
()
==
mindspore
::
TypeId
::
kNumberTypeInt32
,
true
);
ASSERT_EQ
(
std
::
dynamic_pointer_cast
<
DETensor
>
(
ms_tensor
)
->
tensor
()
->
type
()
==
DataType
::
DE_INT32
,
true
);
}
TEST_F
(
MindDataTestTensorDE
,
MSTensorMutableData
)
{
std
::
vector
<
float
>
x
=
{
2.5
,
2.5
,
2.5
,
2.5
};
std
::
shared_ptr
<
Tensor
>
t
;
Tensor
::
CreateFromVector
(
x
,
TensorShape
({
2
,
2
}),
&
t
);
auto
ms_tensor
=
std
::
shared_ptr
<
MSTensor
>
(
new
DETensor
(
t
));
float
*
data
=
static_cast
<
float
*>
(
ms_tensor
->
MutableData
());
std
::
vector
<
float
>
tensor_vec
(
data
,
data
+
ms_tensor
->
ElementsNum
());
ASSERT_EQ
(
x
==
tensor_vec
,
true
);
}
TEST_F
(
MindDataTestTensorDE
,
MSTensorHash
)
{
std
::
vector
<
float
>
x
=
{
2.5
,
2.5
,
2.5
,
2.5
};
std
::
shared_ptr
<
Tensor
>
t
;
Tensor
::
CreateFromVector
(
x
,
TensorShape
({
2
,
2
}),
&
t
);
auto
ms_tensor
=
std
::
shared_ptr
<
MSTensor
>
(
new
DETensor
(
t
));
ASSERT_EQ
(
ms_tensor
->
hash
()
==
11093771382437
,
true
);
}
mindspore/lite/test/ut/src/dataset/eager_test.cc
0 → 100644
浏览文件 @
3e7ba14e
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <chrono>
#include "common/common_test.h"
#include "gtest/gtest.h"
#include "./securec.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/core/config_manager.h"
#include "minddata/dataset/include/datasets.h"
#include "minddata/dataset/include/execute.h"
#include "minddata/dataset/util/path.h"
using
MSTensor
=
mindspore
::
tensor
::
MSTensor
;
using
DETensor
=
mindspore
::
tensor
::
DETensor
;
using
mindspore
::
dataset
::
api
::
vision
::
Decode
;
using
mindspore
::
dataset
::
api
::
vision
::
Normalize
;
using
mindspore
::
dataset
::
api
::
vision
::
Resize
;
using
Execute
=
mindspore
::
dataset
::
api
::
Execute
;
using
Path
=
mindspore
::
dataset
::
Path
;
class
MindDataTestEager
:
public
mindspore
::
Common
{
public:
MindDataTestEager
()
{}
};
TEST_F
(
MindDataTestEager
,
Test1
)
{
#if defined(ENABLE_ARM64) || defined(ENABLE_ARM32)
std
::
string
in_dir
=
"/sdcard/data/testPK/data/class1"
;
#else
std
::
string
in_dir
=
"data/testPK/data/class1"
;
#endif
Path
base_dir
=
Path
(
in_dir
);
MS_LOG
(
WARNING
)
<<
base_dir
.
toString
()
<<
"."
;
if
(
!
base_dir
.
IsDirectory
()
||
!
base_dir
.
Exists
())
{
MS_LOG
(
INFO
)
<<
"Input dir is not a directory or doesn't exist"
<<
"."
;
}
auto
t_start
=
std
::
chrono
::
high_resolution_clock
::
now
();
// check if output_dir exists and create it if it does not exist
// iterate over in dir and create json for all images
auto
dir_it
=
Path
::
DirIterator
::
OpenDirectory
(
&
base_dir
);
while
(
dir_it
->
hasNext
())
{
Path
v
=
dir_it
->
next
();
MS_LOG
(
WARNING
)
<<
v
.
toString
()
<<
"."
;
std
::
shared_ptr
<
MSTensor
>
image
=
std
::
shared_ptr
<
MSTensor
>
(
DETensor
::
CreateTensor
(
v
.
toString
()));
image
=
Execute
(
Decode
())(
image
);
EXPECT_TRUE
(
image
!=
nullptr
);
image
=
Execute
(
Normalize
({
121.0
,
115.0
,
100.0
},
{
70.0
,
68.0
,
71.0
}))(
image
);
EXPECT_TRUE
(
image
!=
nullptr
);
image
=
Execute
(
Resize
({
224
,
224
}))(
image
);
EXPECT_TRUE
(
image
!=
nullptr
);
EXPECT_EQ
(
image
->
DimensionSize
(
0
),
224
);
EXPECT_EQ
(
image
->
DimensionSize
(
1
),
224
);
}
auto
t_end
=
std
::
chrono
::
high_resolution_clock
::
now
();
double
elapsed_time_ms
=
std
::
chrono
::
duration
<
double
,
std
::
milli
>
(
t_end
-
t_start
).
count
();
MS_LOG
(
INFO
)
<<
"duration: "
<<
elapsed_time_ms
<<
" ms
\n
"
;
}
eigen
@
daf9bbec
Subproject commit daf9bbeca26e98da2eed0058835cbb04e0a30ad8
libjpeg-turbo
@
b443c541
Subproject commit b443c541b9a6fdcac214f9f003de0aa13e480ac1
opencv
@
bda89a64
Subproject commit bda89a6469aa79ecd8713967916bd754bff1d931
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录