Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
50dcb79b
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
50dcb79b
编写于
7月 30, 2020
作者:
E
ervinzhang
提交者:
ervinzhang
8月 04, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
added MindData lite
上级
49c1eea1
变更
21
隐藏空白更改
内联
并排
Showing
21 changed file
with
789 addition
and
8 deletion
+789
-8
.gitmodules
.gitmodules
+10
-0
build.sh
build.sh
+46
-0
mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt
mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt
+2
-0
mindspore/ccsrc/minddata/dataset/api/de_tensor.cc
mindspore/ccsrc/minddata/dataset/api/de_tensor.cc
+188
-0
mindspore/ccsrc/minddata/dataset/api/execute.cc
mindspore/ccsrc/minddata/dataset/api/execute.cc
+55
-0
mindspore/ccsrc/minddata/dataset/core/client.h
mindspore/ccsrc/minddata/dataset/core/client.h
+3
-0
mindspore/ccsrc/minddata/dataset/core/tensor.cc
mindspore/ccsrc/minddata/dataset/core/tensor.cc
+4
-0
mindspore/ccsrc/minddata/dataset/core/tensor.h
mindspore/ccsrc/minddata/dataset/core/tensor.h
+9
-0
mindspore/ccsrc/minddata/dataset/include/de_tensor.h
mindspore/ccsrc/minddata/dataset/include/de_tensor.h
+53
-0
mindspore/ccsrc/minddata/dataset/include/execute.h
mindspore/ccsrc/minddata/dataset/include/execute.h
+51
-0
mindspore/ccsrc/minddata/dataset/include/tensor.h
mindspore/ccsrc/minddata/dataset/include/tensor.h
+17
-7
mindspore/ccsrc/minddata/dataset/kernels/image/resize_with_bbox_op.cc
...src/minddata/dataset/kernels/image/resize_with_bbox_op.cc
+0
-1
mindspore/lite/CMakeLists.txt
mindspore/lite/CMakeLists.txt
+27
-0
mindspore/lite/minddata/CMakeLists.txt
mindspore/lite/minddata/CMakeLists.txt
+44
-0
mindspore/lite/src/CMakeLists.txt
mindspore/lite/src/CMakeLists.txt
+4
-0
mindspore/lite/test/CMakeLists.txt
mindspore/lite/test/CMakeLists.txt
+10
-0
mindspore/lite/test/dataset/de_tensor_test.cc
mindspore/lite/test/dataset/de_tensor_test.cc
+98
-0
mindspore/lite/test/dataset/eager_test.cc
mindspore/lite/test/dataset/eager_test.cc
+165
-0
third_party/eigen
third_party/eigen
+1
-0
third_party/libjpeg-turbo
third_party/libjpeg-turbo
+1
-0
third_party/opencv
third_party/opencv
+1
-0
未找到文件。
.gitmodules
浏览文件 @
50dcb79b
...
...
@@ -24,3 +24,13 @@
[submodule "third_party/OpenCL-Headers"]
path = third_party/OpenCL-Headers
url = https://github.com/KhronosGroup/OpenCL-Headers.git
[submodule "third_party/opencv"]
path = third_party/opencv
url = https://github.com/opencv/opencv.git
[submodule "third_party/eigen"]
path = third_party/eigen
url = https://gitlab.com/libeigen/eigen.git
[submodule "third_party/libjpeg-turbo"]
path = third_party/libjpeg-turbo
url = https://github.com/libjpeg-turbo/libjpeg-turbo.git
ignore = dirty
build.sh
浏览文件 @
50dcb79b
...
...
@@ -519,6 +519,50 @@ build_opencl() {
fi
}
build_opencv
()
{
cd
${
BASEPATH
}
if
[[
"
${
INC_BUILD
}
"
==
"off"
]]
;
then
git submodule update
--init
--recursive
third_party/opencv
cd
${
BASEPATH
}
/third_party/opencv
rm
-rf
build
&&
mkdir
-p
build
&&
cd
build
&&
cmake
${
CMAKE_MINDDATA_ARGS
}
-DBUILD_SHARED_LIBS
=
ON
-DBUILD_ANDROID_PROJECTS
=
OFF
\
-DBUILD_LIST
=
core,imgcodecs,imgproc
-DBUILD_ZLIB
=
ON ..
&&
make
-j
$THREAD_NUM
fi
}
build_jpeg_turbo
()
{
cd
${
BASEPATH
}
if
[[
"
${
INC_BUILD
}
"
==
"off"
]]
;
then
git submodule update
--init
--recursive
third_party/libjpeg-turbo
cd
${
BASEPATH
}
/third_party/libjpeg-turbo
rm
-rf
build
&&
mkdir
-p
build
&&
cd
build
&&
cmake
${
CMAKE_MINDDATA_ARGS
}
-DCMAKE_BUILD_TYPE
=
Release
\
-DCMAKE_INSTALL_PREFIX
=
"
${
BASEPATH
}
/third_party/libjpeg-turbo"
..
&&
make
-j
$THREAD_NUM
&&
make
install
fi
}
build_eigen
()
{
cd
${
BASEPATH
}
git submodule update
--init
--recursive
third_party/eigen
}
build_minddata_lite_deps
()
{
echo
"start build minddata lite project"
if
[[
"
${
LITE_PLATFORM
}
"
==
"arm64"
]]
;
then
CMAKE_MINDDATA_ARGS
=
"-DCMAKE_TOOLCHAIN_FILE="
${
ANDROID_NDK
}
/build/cmake/android.toolchain.cmake
" -DANDROID_NATIVE_API_LEVEL="
19
"
\
-DANDROID_NDK="
${
ANDROID_NDK
}
" -DANDROID_ABI="
arm64-v8a
" -DANDROID_TOOLCHAIN_NAME="
aarch64-linux-android-clang
"
\
-DANDROID_STL="
c++_shared
" -DCMAKE_BUILD_TYPE=
${
BUILD_TYPE
}
"
elif
[[
"
${
LITE_PLATFORM
}
"
==
"arm32"
]]
;
then
CMAKE_MINDDATA_ARGS
=
"-DCMAKE_TOOLCHAIN_FILE="
${
ANDROID_NDK
}
/build/cmake/android.toolchain.cmake
" -DANDROID_NATIVE_API_LEVEL="
19
"
\
-DANDROID_NDK="
${
ANDROID_NDK
}
" -DANDROID_ABI="
armeabi-v7a
" -DANDROID_TOOLCHAIN_NAME="
clang
"
\
-DANDROID_STL="
c++_shared
" -DCMAKE_BUILD_TYPE=
${
BUILD_TYPE
}
"
else
CMAKE_MINDDATA_ARGS
=
"-DCMAKE_BUILD_TYPE=
${
BUILD_TYPE
}
"
fi
build_opencv
build_eigen
build_jpeg_turbo
}
build_lite
()
{
echo
"start build mindspore lite project"
...
...
@@ -533,6 +577,8 @@ build_lite()
build_flatbuffer
build_gtest
build_minddata_lite_deps
cd
"
${
BASEPATH
}
/mindspore/lite"
if
[[
"
${
INC_BUILD
}
"
==
"off"
]]
;
then
rm
-rf
build
...
...
mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt
浏览文件 @
50dcb79b
...
...
@@ -13,4 +13,6 @@ add_library(cpp-API OBJECT
iterator.cc
transforms.cc
samplers.cc
de_tensor.cc
execute.cc
)
mindspore/ccsrc/minddata/dataset/api/de_tensor.cc
0 → 100644
浏览文件 @
50dcb79b
#include "minddata/dataset/include/de_tensor.h"
#include "minddata/dataset/core/constants.h"
#include "minddata/dataset/core/data_type.h"
#include "mindspore/core/ir/dtype/type_id.h"
#include "utils/hashing.h"
#include "mindspore/lite/src/ir/tensor.h"
namespace
mindspore
{
namespace
tensor
{
dataset
::
DataType
MSTypeToDEType
(
TypeId
data_type
)
{
switch
(
data_type
)
{
case
kNumberTypeBool
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_BOOL
);
case
kNumberTypeInt8
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_INT8
);
case
kNumberTypeUInt8
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_UINT8
);
case
kNumberTypeInt16
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_INT16
);
case
kNumberTypeUInt16
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_UINT16
);
case
kNumberTypeInt32
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_INT32
);
case
kNumberTypeUInt32
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_UINT32
);
case
kNumberTypeInt64
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_INT64
);
case
kNumberTypeUInt64
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_UINT64
);
case
kNumberTypeFloat16
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_FLOAT16
);
case
kNumberTypeFloat32
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_FLOAT32
);
case
kNumberTypeFloat64
:
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_FLOAT64
);
default:
// maybe throw?
return
dataset
::
DataType
(
dataset
::
DataType
::
DE_UNKNOWN
);
}
}
TypeId
DETypeToMSType
(
dataset
::
DataType
data_type
)
{
switch
(
data_type
.
value
())
{
case
dataset
::
DataType
::
DE_BOOL
:
return
mindspore
::
TypeId
::
kNumberTypeBool
;
case
dataset
::
DataType
::
DE_INT8
:
return
mindspore
::
TypeId
::
kNumberTypeInt8
;
case
dataset
::
DataType
::
DE_UINT8
:
return
mindspore
::
TypeId
::
kNumberTypeUInt8
;
case
dataset
::
DataType
::
DE_INT16
:
return
mindspore
::
TypeId
::
kNumberTypeInt16
;
case
dataset
::
DataType
::
DE_UINT16
:
return
mindspore
::
TypeId
::
kNumberTypeUInt16
;
case
dataset
::
DataType
::
DE_INT32
:
return
mindspore
::
TypeId
::
kNumberTypeInt32
;
case
dataset
::
DataType
::
DE_UINT32
:
return
mindspore
::
TypeId
::
kNumberTypeUInt32
;
case
dataset
::
DataType
::
DE_INT64
:
return
mindspore
::
TypeId
::
kNumberTypeInt64
;
case
dataset
::
DataType
::
DE_UINT64
:
return
mindspore
::
TypeId
::
kNumberTypeUInt64
;
case
dataset
::
DataType
::
DE_FLOAT16
:
return
mindspore
::
TypeId
::
kNumberTypeFloat16
;
case
dataset
::
DataType
::
DE_FLOAT32
:
return
mindspore
::
TypeId
::
kNumberTypeFloat32
;
case
dataset
::
DataType
::
DE_FLOAT64
:
return
mindspore
::
TypeId
::
kNumberTypeFloat64
;
default:
// maybe throw?
return
kTypeUnknown
;
}
}
MSTensor
*
DETensor
::
CreateTensor
(
TypeId
data_type
,
const
std
::
vector
<
int
>
&
shape
)
{
return
new
DETensor
(
data_type
,
shape
);
}
MSTensor
*
DETensor
::
CreateTensor
(
const
std
::
string
&
path
)
{
std
::
shared_ptr
<
dataset
::
Tensor
>
t
;
(
void
)
dataset
::
Tensor
::
CreateFromFile
(
path
,
&
t
);
return
new
DETensor
(
std
::
move
(
t
));
}
DETensor
::
DETensor
(
TypeId
data_type
,
const
std
::
vector
<
int
>
&
shape
)
{
std
::
vector
<
dataset
::
dsize_t
>
t_shape
;
t_shape
.
reserve
(
shape
.
size
());
std
::
transform
(
shape
.
begin
(),
shape
.
end
(),
std
::
back_inserter
(
t_shape
),
[](
int
s
)
->
dataset
::
dsize_t
{
return
static_cast
<
dataset
::
dsize_t
>
(
s
);});
dataset
::
Tensor
::
CreateEmpty
(
dataset
::
TensorShape
(
t_shape
),
MSTypeToDEType
(
data_type
),
&
this
->
tensor_impl_
);
}
DETensor
::
DETensor
(
std
::
shared_ptr
<
dataset
::
Tensor
>
tensor_ptr
)
{
this
->
tensor_impl_
=
std
::
move
(
tensor_ptr
);
}
MSTensor
*
DETensor
::
ConvertToLiteTensor
()
{
// static MSTensor::CreateTensor is only for the LiteTensor
MSTensor
*
tensor
=
MSTensor
::
CreateTensor
(
this
->
data_type
(),
this
->
shape
());
MS_ASSERT
(
tensor
->
Size
()
==
this
->
Size
());
memcpy_s
(
tensor
->
MutableData
(),
tensor
->
Size
(),
this
->
MutableData
(),
this
->
Size
());
return
tensor
;
}
std
::
shared_ptr
<
dataset
::
Tensor
>
DETensor
::
tensor
()
const
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
return
this
->
tensor_impl_
;
}
TypeId
DETensor
::
data_type
()
const
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
return
DETypeToMSType
(
this
->
tensor_impl_
->
type
());
}
TypeId
DETensor
::
set_data_type
(
TypeId
data_type
)
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
if
(
data_type
!=
this
->
data_type
())
{
std
::
shared_ptr
<
dataset
::
Tensor
>
temp
;
dataset
::
Tensor
::
CreateFromMemory
(
this
->
tensor_impl_
->
shape
(),
MSTypeToDEType
(
data_type
),
this
->
tensor_impl_
->
GetBuffer
(),
&
temp
);
this
->
tensor_impl_
=
temp
;
}
return
data_type
;
}
std
::
vector
<
int
>
DETensor
::
shape
()
const
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
std
::
vector
<
dataset
::
dsize_t
>
t_shape
=
this
->
tensor_impl_
->
shape
().
AsVector
();
std
::
vector
<
int
>
shape
;
shape
.
reserve
(
t_shape
.
size
());
std
::
transform
(
t_shape
.
begin
(),
t_shape
.
end
(),
std
::
back_inserter
(
shape
),
[](
dataset
::
dsize_t
s
)
->
int
{
return
static_cast
<
int
>
(
s
);});
return
shape
;
}
size_t
DETensor
::
set_shape
(
const
std
::
vector
<
int
>
&
shape
)
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
std
::
vector
<
dataset
::
dsize_t
>
t_shape
;
t_shape
.
reserve
(
shape
.
size
());
std
::
transform
(
shape
.
begin
(),
shape
.
end
(),
std
::
back_inserter
(
t_shape
),
[](
int
s
)
->
dataset
::
dsize_t
{
return
static_cast
<
dataset
::
dsize_t
>
(
s
);});
dataset
::
Status
rc
=
this
->
tensor_impl_
->
Reshape
(
dataset
::
TensorShape
(
t_shape
));
//TODO: what if t_shape has different size?
return
shape
.
size
();
}
int
DETensor
::
DimensionSize
(
size_t
index
)
const
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
int
dim_size
=
-
1
;
auto
shape
=
this
->
shape
();
if
(
index
<
shape
.
size
())
{
dim_size
=
shape
[
index
];
}
else
{
MS_LOG
(
ERROR
)
<<
"Dimension index is wrong: "
<<
index
;
}
return
dim_size
;
}
int
DETensor
::
ElementsNum
()
const
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
return
this
->
tensor_impl_
->
Size
();
}
std
::
size_t
DETensor
::
hash
()
const
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
auto
shape
=
this
->
shape
();
std
::
size_t
hash_value
=
std
::
hash
<
int
>
{}(
SizeToInt
(
this
->
data_type
()));
hash_value
=
hash_combine
(
hash_value
,
std
::
hash
<
size_t
>
{}(
shape
.
size
()));
// hash all elements may costly, so only take at most 4 elements into account based on
// some experiments.
for
(
size_t
i
=
0
;
(
i
<
shape
.
size
())
&&
(
i
<
4
);
++
i
)
{
hash_value
=
hash_combine
(
hash_value
,
(
std
::
hash
<
int
>
{}(
shape
[
i
])));
}
return
hash_value
;
}
size_t
DETensor
::
Size
()
const
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
return
this
->
tensor_impl_
->
SizeInBytes
();
}
void
*
DETensor
::
MutableData
()
const
{
MS_ASSERT
(
this
->
tensor_impl_
!=
nullptr
);
// TODO: friend the DETensor?
return
this
->
tensor_impl_
->
GetMutableBuffer
();
}
}
// namespace tensor
}
// namespace mindspore
\ No newline at end of file
mindspore/ccsrc/minddata/dataset/api/execute.cc
0 → 100644
浏览文件 @
50dcb79b
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "minddata/dataset/include/execute.h"
#include "minddata/dataset/include/de_tensor.h"
#include "minddata/dataset/include/tensor.h"
#include "minddata/dataset/kernels/tensor_op.h"
namespace
mindspore
{
namespace
dataset
{
namespace
api
{
Execute
::
Execute
(
const
std
::
shared_ptr
<
TensorOperation
>
&
op
)
:
op_
(
std
::
move
(
op
))
{}
std
::
shared_ptr
<
tensor
::
MSTensor
>
Execute
::
operator
()(
std
::
shared_ptr
<
tensor
::
MSTensor
>
input
){
// Build the op
if
(
op_
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Input TensorOperation is not valid"
;
return
nullptr
;
}
std
::
shared_ptr
<
Tensor
>
de_input
=
std
::
dynamic_pointer_cast
<
tensor
::
DETensor
>
(
input
)
->
tensor
();
if
(
de_input
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Input Tensor is not valid"
;
return
nullptr
;
}
std
::
shared_ptr
<
TensorOp
>
transform
=
op_
->
Build
();
std
::
shared_ptr
<
Tensor
>
de_output
;
Status
rc
=
transform
->
Compute
(
de_input
,
&
de_output
);
if
(
rc
.
IsError
())
{
// execution failed
MS_LOG
(
ERROR
)
<<
"Operation execution failed : "
<<
rc
.
ToString
();
return
nullptr
;
}
return
std
::
shared_ptr
<
tensor
::
MSTensor
>
(
new
tensor
::
DETensor
(
std
::
move
(
de_output
)));
}
}
// namespace api
}
// namespace dataset
}
// namespace mindspore
mindspore/ccsrc/minddata/dataset/core/client.h
浏览文件 @
50dcb79b
...
...
@@ -25,8 +25,11 @@
#include "minddata/dataset/core/tensor_shape.h"
#include "minddata/dataset/engine/data_schema.h"
#include "minddata/dataset/engine/dataset_iterator.h"
#ifndef ENABLE_ANDROID
#include "minddata/dataset/engine/datasetops/source/mindrecord_op.h"
#include "minddata/dataset/engine/datasetops/source/tf_reader_op.h"
#endif
#ifdef ENABLE_PYTHON
#include "minddata/dataset/engine/datasetops/barrier_op.h"
...
...
mindspore/ccsrc/minddata/dataset/core/tensor.cc
浏览文件 @
50dcb79b
...
...
@@ -213,6 +213,7 @@ Status Tensor::CreateFromNpArray(const py::array &arr, std::shared_ptr<Tensor> *
}
#endif
#ifndef ENABLE_ANDROID
Status
Tensor
::
CreateFromByteList
(
const
dataengine
::
BytesList
&
bytes_list
,
const
TensorShape
&
shape
,
TensorPtr
*
out
)
{
const
TensorAlloc
*
alloc
=
GlobalContext
::
Instance
()
->
tensor_allocator
();
*
out
=
std
::
allocate_shared
<
Tensor
>
(
*
alloc
,
TensorShape
({
static_cast
<
dsize_t
>
(
bytes_list
.
value_size
())}),
...
...
@@ -255,6 +256,7 @@ Status Tensor::CreateFromByteList(const dataengine::BytesList &bytes_list, const
(
*
out
)
->
Reshape
(
shape
);
return
Status
::
OK
();
}
#endif
Status
Tensor
::
CreateFromFile
(
const
std
::
string
&
path
,
std
::
shared_ptr
<
Tensor
>
*
out
)
{
std
::
ifstream
fs
;
...
...
@@ -269,6 +271,7 @@ Status Tensor::CreateFromFile(const std::string &path, std::shared_ptr<Tensor> *
return
Status
::
OK
();
}
#ifndef ENABLE_ANDROID
Status
Tensor
::
CreateFromByteList
(
const
dataengine
::
BytesList
&
bytes_list
,
const
TensorShape
&
shape
,
const
DataType
&
type
,
dsize_t
pad_size
,
TensorPtr
*
out
)
{
RETURN_IF_NOT_OK
(
Tensor
::
CreateEmpty
(
shape
,
type
,
out
));
...
...
@@ -298,6 +301,7 @@ Status Tensor::CreateFromByteList(const dataengine::BytesList &bytes_list, const
return
Status
::
OK
();
}
#endif
// Memcpy the given strided array's used part to consecutive memory
// Consider a 3-d array
...
...
mindspore/ccsrc/minddata/dataset/core/tensor.h
浏览文件 @
50dcb79b
...
...
@@ -38,12 +38,18 @@
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/tensor_shape.h"
#include "minddata/dataset/util/status.h"
#include "minddata/dataset/include/de_tensor.h"
#ifndef ENABLE_ANDROID
#include "proto/example.pb.h"
#endif
#ifdef ENABLE_PYTHON
namespace
py
=
pybind11
;
#endif
namespace
mindspore
{
namespace
tensor
{
class
DETensor
;
}
// namespace tensor
namespace
dataset
{
class
Tensor
;
template
<
typename
T
>
...
...
@@ -55,6 +61,7 @@ using offset_t = uint32_t; // type of offset va
using
TensorPtr
=
std
::
shared_ptr
<
Tensor
>
;
class
Tensor
{
friend
class
tensor
::
DETensor
;
public:
Tensor
()
=
delete
;
Tensor
(
const
Tensor
&
other
)
=
delete
;
...
...
@@ -117,6 +124,7 @@ class Tensor {
static
Status
CreateFromNpArray
(
const
py
::
array
&
arr
,
TensorPtr
*
out
);
#endif
#ifndef ENABLE_ANDROID
/// Create a tensor of type DE_STRING from a BytesList.
/// \param[in] bytes_list protobuf's Bytelist
/// \param[in] shape shape of the outout tensor
...
...
@@ -134,6 +142,7 @@ class Tensor {
/// \return Status Code
static
Status
CreateFromByteList
(
const
dataengine
::
BytesList
&
bytes_list
,
const
TensorShape
&
shape
,
const
DataType
&
type
,
dsize_t
pad_size
,
TensorPtr
*
out
);
#endif
/// Create a Tensor from a given list of values.
/// \tparam type of the values to be inserted.
...
...
mindspore/ccsrc/minddata/dataset/include/de_tensor.h
0 → 100644
浏览文件 @
50dcb79b
#ifndef DATASET_INCLUDE_DETENSOR_H_
#define DATASET_INCLUDE_DETENSOR_H_
#include "include/ms_tensor.h"
#include "minddata/dataset/include/tensor.h"
#include "minddata/dataset/util/status.h"
namespace
mindspore
{
namespace
tensor
{
class
DETensor
:
public
MSTensor
{
public:
// brief Create a MSTensor pointer.
//
// param data_type DataTypeId of tensor to be created.
// param shape Shape of tensor to be created.
// return MSTensor pointer.
static
MSTensor
*
CreateTensor
(
TypeId
data_type
,
const
std
::
vector
<
int
>
&
shape
);
static
MSTensor
*
CreateTensor
(
const
std
::
string
&
path
);
DETensor
(
TypeId
data_type
,
const
std
::
vector
<
int
>
&
shape
);
explicit
DETensor
(
std
::
shared_ptr
<
dataset
::
Tensor
>
tensor_ptr
);
~
DETensor
()
=
default
;
MSTensor
*
ConvertToLiteTensor
();
std
::
shared_ptr
<
dataset
::
Tensor
>
tensor
()
const
;
TypeId
data_type
()
const
override
;
TypeId
set_data_type
(
const
TypeId
data_type
)
override
;
std
::
vector
<
int
>
shape
()
const
override
;
size_t
set_shape
(
const
std
::
vector
<
int
>
&
shape
)
override
;
int
DimensionSize
(
size_t
index
)
const
override
;
int
ElementsNum
()
const
override
;
std
::
size_t
hash
()
const
override
;
size_t
Size
()
const
override
;
void
*
MutableData
()
const
override
;
protected:
std
::
shared_ptr
<
dataset
::
Tensor
>
tensor_impl_
;
};
}
// namespace tensor
}
// namespace mindspore
#endif // DATASET_INCLUDE_DETENSOR_H_
\ No newline at end of file
mindspore/ccsrc/minddata/dataset/include/execute.h
0 → 100644
浏览文件 @
50dcb79b
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef DATASET_API_EXECUTE_H_
#define DATASET_API_EXECUTE_H_
#include <vector>
#include <memory>
#include "minddata/dataset/core/constants.h"
#include "minddata/dataset/include/de_tensor.h"
#include "minddata/dataset/include/transforms.h"
namespace
mindspore
{
namespace
dataset
{
class
TensorOp
;
namespace
api
{
class
Execute
{
public:
/// \brief Constructor
Execute
(
const
std
::
shared_ptr
<
TensorOperation
>
&
op
);
/// \brief callable function to execute the TensorOperation in eager mode
/// \param[inout] input - the tensor to be transformed
/// \return - the output tensor, nullptr if Compute fails
std
::
shared_ptr
<
tensor
::
MSTensor
>
operator
()(
std
::
shared_ptr
<
tensor
::
MSTensor
>
input
);
private:
std
::
shared_ptr
<
TensorOperation
>
op_
;
};
}
// namespace api
}
// namespace dataset
}
// namespace mindspore
#endif // DATASET_API_EXECUTE_H_
mindspore/ccsrc/minddata/dataset/include/tensor.h
浏览文件 @
50dcb79b
...
...
@@ -38,12 +38,18 @@
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/tensor_shape.h"
#include "minddata/dataset/util/status.h"
#include "minddata/dataset/include/de_tensor.h"
#ifndef ENABLE_ANDROID
#include "proto/example.pb.h"
#endif
#ifdef ENABLE_PYTHON
namespace
py
=
pybind11
;
#endif
namespace
mindspore
{
namespace
tensor
{
class
DETensor
;
}
// namespace tensor
namespace
dataset
{
class
Tensor
;
template
<
typename
T
>
...
...
@@ -55,6 +61,7 @@ using offset_t = uint32_t; // type of offset va
using
TensorPtr
=
std
::
shared_ptr
<
Tensor
>
;
class
Tensor
{
friend
class
tensor
::
DETensor
;
public:
Tensor
()
=
delete
;
Tensor
(
const
Tensor
&
other
)
=
delete
;
...
...
@@ -117,6 +124,7 @@ class Tensor {
static
Status
CreateFromNpArray
(
const
py
::
array
&
arr
,
TensorPtr
*
out
);
#endif
#ifndef ENABLE_ANDROID
/// Create a tensor of type DE_STRING from a BytesList.
/// \param[in] bytes_list protobuf's Bytelist
/// \param[in] shape shape of the outout tensor
...
...
@@ -134,6 +142,7 @@ class Tensor {
/// \return Status Code
static
Status
CreateFromByteList
(
const
dataengine
::
BytesList
&
bytes_list
,
const
TensorShape
&
shape
,
const
DataType
&
type
,
dsize_t
pad_size
,
TensorPtr
*
out
);
#endif
/// Create a Tensor from a given list of values.
/// \tparam type of the values to be inserted.
...
...
@@ -649,13 +658,6 @@ class Tensor {
unsigned
char
*
data_end_
=
nullptr
;
private:
#ifdef ENABLE_PYTHON
/// Helper function to create a tensor from Numpy array of strings
/// \param[in] arr Numpy array
/// \param[out] out Created Tensor
/// \return Status
static
Status
CreateFromNpString
(
py
::
array
arr
,
TensorPtr
*
out
);
#endif
/// Copy raw data of a array based on shape and strides to the destination pointer
/// \param dst [out] Pointer to the destination array where the content is to be copied
/// \param[in] src Pointer to the source of strided array to be copied
...
...
@@ -668,6 +670,14 @@ class Tensor {
/// const of the size of the offset variable
static
constexpr
uint8_t
kOffsetSize
=
sizeof
(
offset_t
);
#ifdef ENABLE_PYTHON
/// Helper function to create a tensor from Numpy array of strings
/// \param[in] arr Numpy array
/// \param[out] out Created Tensor
/// \return Status
static
Status
CreateFromNpString
(
py
::
array
arr
,
TensorPtr
*
out
);
#endif
};
template
<
>
inline
Tensor
::
TensorIterator
<
std
::
string_view
>
Tensor
::
end
<
std
::
string_view
>
()
{
...
...
mindspore/ccsrc/minddata/dataset/kernels/image/resize_with_bbox_op.cc
浏览文件 @
50dcb79b
...
...
@@ -20,7 +20,6 @@
#include "minddata/dataset/kernels/image/resize_op.h"
#include "minddata/dataset/kernels/image/image_utils.h"
#include "minddata/dataset/core/cv_tensor.h"
#include "minddata/dataset/core/pybind_support.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/kernels/tensor_op.h"
#include "minddata/dataset/util/status.h"
...
...
mindspore/lite/CMakeLists.txt
浏览文件 @
50dcb79b
...
...
@@ -33,6 +33,7 @@ option(BUILD_CONVERTER "if build converter" on)
option
(
ENABLE_FP16
"if build fp16 ops"
off
)
option
(
SUPPORT_GPU
"if support gpu"
off
)
option
(
OFFLINE_COMPILE
"if offline compile OpenCL kernel"
off
)
option
(
BUILD_MINDDATA
""
on
)
if
(
BUILD_DEVICE
)
add_compile_definitions
(
BUILD_DEVICE
)
...
...
@@ -116,6 +117,32 @@ if (BUILD_DEVICE)
set
(
CMAKE_CXX_FLAGS
"
${
CMAKE_CXX_FLAGS
}
-march=armv8.2-a+dotprod+fp16"
)
endif
()
endif
()
endif
()
if
(
BUILD_MINDDATA
)
# opencv
set
(
OpenCV_DIR
${
TOP_DIR
}
/third_party/opencv/build
)
find_package
(
OpenCV REQUIRED
)
include_directories
(
${
OpenCV_INCLUDE_DIRS
}
)
# eigen
include_directories
(
${
TOP_DIR
}
/third_party/eigen/
)
# jpeg-turbo
add_library
(
jpeg-turbo SHARED IMPORTED
)
set_target_properties
(
jpeg-turbo PROPERTIES
IMPORTED_LOCATION
${
TOP_DIR
}
/third_party/libjpeg-turbo/lib/libturbojpeg.so
)
add_library
(
jpeg SHARED IMPORTED
)
set_target_properties
(
jpeg PROPERTIES
IMPORTED_LOCATION
${
TOP_DIR
}
/third_party/libjpeg-turbo/lib/libjpeg.so
)
include_directories
(
${
TOP_DIR
}
/third_party/libjpeg-turbo/include
)
add_compile_definitions
(
ENABLE_ANDROID
)
add_compile_definitions
(
ENABLE_EAGER
)
add_subdirectory
(
${
CMAKE_CURRENT_SOURCE_DIR
}
/minddata
)
endif
()
if
(
BUILD_DEVICE
)
add_subdirectory
(
${
CMAKE_CURRENT_SOURCE_DIR
}
/src
)
add_subdirectory
(
${
CMAKE_CURRENT_SOURCE_DIR
}
/tools/benchmark
)
add_subdirectory
(
${
CMAKE_CURRENT_SOURCE_DIR
}
/test
)
...
...
mindspore/lite/minddata/CMakeLists.txt
0 → 100644
浏览文件 @
50dcb79b
set
(
MINDDATA_DIR
${
CCSRC_DIR
}
/minddata/dataset
)
set
(
CMAKE_CXX_STANDARD 17
)
set
(
CMAKE_CXX_FLAGS
"
${
CMAKE_CXX_FLAGS
}
-std=c++17"
)
set
(
CMAKE_C_FLAGS
"
${
CMAKE_C_FLAGS
}
"
)
set
(
CMAKE_CXX_FLAGS
"
${
CMAKE_CXX_FLAGS
}
-fPIC -Wall -Wno-deprecated-declarations"
)
set
(
CMAKE_SHARED_LINKER_FLAGS
"
${
CMAKE_SHARED_LINKER_FLAGS
}
-s"
)
AUX_SOURCE_DIRECTORY
(
${
MINDDATA_DIR
}
/core MINDDATA_CORE_SRC_FILES
)
list
(
REMOVE_ITEM MINDDATA_CORE_SRC_FILES
"
${
MINDDATA_DIR
}
/core/client.cc"
)
AUX_SOURCE_DIRECTORY
(
${
MINDDATA_DIR
}
/kernels MINDDATA_KERNELS_SRC_FILES
)
list
(
REMOVE_ITEM MINDDATA_KERNELS_SRC_FILES
"
${
MINDDATA_DIR
}
/kernels/py_func_op.cc"
)
AUX_SOURCE_DIRECTORY
(
${
MINDDATA_DIR
}
/kernels/image MINDDATA_KERNELS_IMAGE_SRC_FILES
)
AUX_SOURCE_DIRECTORY
(
${
MINDDATA_DIR
}
/kernels/data MINDDATA_KERNELS_DATA_SRC_FILES
)
add_library
(
minddata-eager OBJECT
${
MINDDATA_DIR
}
/api/de_tensor.cc
${
MINDDATA_DIR
}
/api/execute.cc
)
add_library
(
minddata-lite SHARED
${
MINDDATA_CORE_SRC_FILES
}
${
MINDDATA_KERNELS_SRC_FILES
}
${
MINDDATA_KERNELS_IMAGE_SRC_FILES
}
${
MINDDATA_KERNELS_DATA_SRC_FILES
}
${
MINDDATA_DIR
}
/util/status.cc
${
MINDDATA_DIR
}
/util/memory_pool.cc
${
MINDDATA_DIR
}
/util/path.cc
${
MINDDATA_DIR
}
/api/transforms.cc
${
CORE_DIR
}
/utils/log_adapter.cc
${
CCSRC_DIR
}
/gvar/logging_level.cc
)
target_link_libraries
(
minddata-lite
securec
jpeg-turbo
jpeg
opencv_core
opencv_imgcodecs
opencv_imgproc
mindspore::json
)
\ No newline at end of file
mindspore/lite/src/CMakeLists.txt
浏览文件 @
50dcb79b
...
...
@@ -80,5 +80,9 @@ target_link_libraries(mindspore-lite
)
add_subdirectory
(
runtime/kernel/arm
)
if
(
BUILD_MINDDATA
)
target_link_libraries
(
mindspore-lite minddata-eager minddata-lite log
)
endif
()
add_subdirectory
(
ops
)
mindspore/lite/test/CMakeLists.txt
浏览文件 @
50dcb79b
...
...
@@ -129,6 +129,15 @@ if (SUPPORT_GPU)
${
LITE_DIR
}
/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc
)
endif
()
### minddata lite
if
(
BUILD_MINDDATA
)
include_directories
(
${
CCSRC_DIR
}
/minddata
)
set
(
DATASET_TEST_DIR
${
CMAKE_CURRENT_SOURCE_DIR
}
/dataset
)
set
(
TEST_MINDDATA_SRC
${
DATASET_TEST_DIR
}
/de_tensor_test.cc
${
DATASET_TEST_DIR
}
/eager_test.cc
)
endif
()
### runtime framework
file
(
GLOB_RECURSE OPS_SRC
${
LITE_DIR
}
/src/ops/*.cc
)
set
(
TEST_LITE_SRC
...
...
@@ -245,6 +254,7 @@ file(GLOB_RECURSE TEST_CASE_KERNEL_SRC
set
(
TEST_SRC
${
TEST_LITE_SRC
}
${
TEST_MINDDATA_SRC
}
${
TEST_CASE_KERNEL_SRC
}
${
TEST_DIR
}
/common/common_test.cc
${
TEST_DIR
}
/main.cc
...
...
mindspore/lite/test/dataset/de_tensor_test.cc
0 → 100644
浏览文件 @
50dcb79b
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <memory>
#include <string>
#include "common/common_test.h"
#include "gtest/gtest.h"
#include "securec.h"
#include "dataset/core/tensor.h"
#include "dataset/core/cv_tensor.h"
#include "dataset/core/data_type.h"
#include "mindspore/lite/src/ir/tensor.h"
using
namespace
mindspore
::
dataset
;
class
MindDataTestTensorDE
:
public
UT
::
Common
{
public:
MindDataTestTensorDE
()
{}
};
TEST_F
(
MindDataTestTensorDE
,
MSTensorBasic
)
{
std
::
shared_ptr
<
Tensor
>
t
=
std
::
make_shared
<
Tensor
>
(
TensorShape
({
2
,
3
}),
DataType
(
DataType
::
DE_FLOAT32
));
auto
ms_tensor
=
std
::
shared_ptr
<
mindspore
::
tensor
::
MSTensor
>
(
new
mindspore
::
tensor
::
DETensor
(
t
));
ASSERT_EQ
(
t
==
std
::
dynamic_pointer_cast
<
mindspore
::
tensor
::
DETensor
>
(
ms_tensor
)
->
tensor
(),
true
);
}
TEST_F
(
MindDataTestTensorDE
,
MSTensorConvertToLiteTensor
)
{
std
::
shared_ptr
<
Tensor
>
t
=
std
::
make_shared
<
Tensor
>
(
TensorShape
({
2
,
3
}),
DataType
(
DataType
::
DE_FLOAT32
));
auto
ms_tensor
=
std
::
shared_ptr
<
mindspore
::
tensor
::
DETensor
>
(
new
mindspore
::
tensor
::
DETensor
(
t
));
std
::
shared_ptr
<
mindspore
::
tensor
::
MSTensor
>
lite_ms_tensor
=
std
::
shared_ptr
<
mindspore
::
tensor
::
MSTensor
>
(
std
::
dynamic_pointer_cast
<
mindspore
::
tensor
::
DETensor
>
(
ms_tensor
)
->
ConvertToLiteTensor
());
// check if the lite_ms_tensor is the derived LiteTensor
mindspore
::
tensor
::
LiteTensor
*
lite_tensor
=
static_cast
<
mindspore
::
tensor
::
LiteTensor
*>
(
lite_ms_tensor
.
get
());
ASSERT_EQ
(
lite_tensor
!=
nullptr
,
true
);
}
TEST_F
(
MindDataTestTensorDE
,
MSTensorShape
)
{
std
::
shared_ptr
<
Tensor
>
t
=
std
::
make_shared
<
Tensor
>
(
TensorShape
({
2
,
3
}),
DataType
(
DataType
::
DE_FLOAT32
));
auto
ms_tensor
=
std
::
shared_ptr
<
mindspore
::
tensor
::
MSTensor
>
(
new
mindspore
::
tensor
::
DETensor
(
t
));
ASSERT_EQ
(
ms_tensor
->
DimensionSize
(
0
)
==
2
,
true
);
ASSERT_EQ
(
ms_tensor
->
DimensionSize
(
1
)
==
3
,
true
);
ms_tensor
->
set_shape
(
std
::
vector
<
int
>
{
3
,
2
});
ASSERT_EQ
(
ms_tensor
->
DimensionSize
(
0
)
==
3
,
true
);
ASSERT_EQ
(
ms_tensor
->
DimensionSize
(
1
)
==
2
,
true
);
ms_tensor
->
set_shape
(
std
::
vector
<
int
>
{
6
});
ASSERT_EQ
(
ms_tensor
->
DimensionSize
(
0
)
==
6
,
true
);
}
TEST_F
(
MindDataTestTensorDE
,
MSTensorSize
)
{
std
::
shared_ptr
<
Tensor
>
t
=
std
::
make_shared
<
Tensor
>
(
TensorShape
({
2
,
3
}),
DataType
(
DataType
::
DE_FLOAT32
));
auto
ms_tensor
=
std
::
shared_ptr
<
mindspore
::
tensor
::
MSTensor
>
(
new
mindspore
::
tensor
::
DETensor
(
t
));
ASSERT_EQ
(
ms_tensor
->
ElementsNum
()
==
6
,
true
);
ASSERT_EQ
(
ms_tensor
->
Size
()
==
24
,
true
);
}
TEST_F
(
MindDataTestTensorDE
,
MSTensorDataType
)
{
std
::
shared_ptr
<
Tensor
>
t
=
std
::
make_shared
<
Tensor
>
(
TensorShape
({
2
,
3
}),
DataType
(
DataType
::
DE_FLOAT32
));
auto
ms_tensor
=
std
::
shared_ptr
<
mindspore
::
tensor
::
MSTensor
>
(
new
mindspore
::
tensor
::
DETensor
(
t
));
ASSERT_EQ
(
ms_tensor
->
data_type
()
==
mindspore
::
TypeId
::
kNumberTypeFloat32
,
true
);
ms_tensor
->
set_data_type
(
mindspore
::
TypeId
::
kNumberTypeInt32
);
ASSERT_EQ
(
ms_tensor
->
data_type
()
==
mindspore
::
TypeId
::
kNumberTypeInt32
,
true
);
ASSERT_EQ
(
std
::
dynamic_pointer_cast
<
mindspore
::
tensor
::
DETensor
>
(
ms_tensor
)
->
tensor
()
->
type
()
==
DataType
::
DE_INT32
,
true
);
}
TEST_F
(
MindDataTestTensorDE
,
MSTensorMutableData
)
{
std
::
vector
<
float
>
x
=
{
2.5
,
2.5
,
2.5
,
2.5
};
std
::
shared_ptr
<
Tensor
>
t
;
Tensor
::
CreateTensor
(
&
t
,
x
,
TensorShape
({
2
,
2
}));
auto
ms_tensor
=
std
::
shared_ptr
<
mindspore
::
tensor
::
MSTensor
>
(
new
mindspore
::
tensor
::
DETensor
(
t
));
float
*
data
=
static_cast
<
float
*>
(
ms_tensor
->
MutableData
());
std
::
vector
<
float
>
tensor_vec
(
data
,
data
+
ms_tensor
->
ElementsNum
());
ASSERT_EQ
(
x
==
tensor_vec
,
true
);
// TODO: add set_data_type after implmenting it
}
TEST_F
(
MindDataTestTensorDE
,
MSTensorHash
)
{
std
::
vector
<
float
>
x
=
{
2.5
,
2.5
,
2.5
,
2.5
};
std
::
shared_ptr
<
Tensor
>
t
;
Tensor
::
CreateTensor
(
&
t
,
x
,
TensorShape
({
2
,
2
}));
auto
ms_tensor
=
std
::
shared_ptr
<
mindspore
::
tensor
::
MSTensor
>
(
new
mindspore
::
tensor
::
DETensor
(
t
));
#ifdef ENABLE_ARM64
ASSERT_EQ
(
ms_tensor
->
hash
()
==
11093771382437
,
true
);
// arm64
#else
ASSERT_EQ
(
ms_tensor
->
hash
()
==
11093825635904
,
true
);
#endif
}
\ No newline at end of file
mindspore/lite/test/dataset/eager_test.cc
0 → 100644
浏览文件 @
50dcb79b
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <chrono>
#include "common/common_test.h"
#include "gtest/gtest.h"
#include "securec.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/core/config_manager.h"
#include "minddata/dataset/include/datasets.h"
#include "minddata/dataset/include/execute.h"
#include "minddata/dataset/util/path.h"
using
namespace
mindspore
::
dataset
;
using
namespace
mindspore
::
dataset
::
api
;
using
namespace
mindspore
;
class
MindDataTestEager
:
public
UT
::
Common
{
public:
MindDataTestEager
()
{}
};
TEST_F
(
MindDataTestEager
,
Test1
)
{
std
::
string
in_dir
=
"/sdcard/data/testPK/data/class1"
;
Path
base_dir
=
Path
(
in_dir
);
MS_LOG
(
WARNING
)
<<
base_dir
.
toString
()
<<
"."
;
if
(
!
base_dir
.
IsDirectory
()
||
!
base_dir
.
Exists
())
{
MS_LOG
(
INFO
)
<<
"Input dir is not a directory or doesn't exist"
<<
"."
;
}
auto
t_start
=
std
::
chrono
::
high_resolution_clock
::
now
();
// check if output_dir exists and create it if it does not exist
// iterate over in dir and create json for all images
auto
dir_it
=
Path
::
DirIterator
::
OpenDirectory
(
&
base_dir
);
while
(
dir_it
->
hasNext
())
{
Path
v
=
dir_it
->
next
();
MS_LOG
(
WARNING
)
<<
v
.
toString
()
<<
"."
;
std
::
shared_ptr
<
tensor
::
MSTensor
>
image
=
std
::
shared_ptr
<
tensor
::
MSTensor
>
(
tensor
::
DETensor
::
CreateTensor
(
v
.
toString
()));
image
=
Execute
(
vision
::
Decode
())(
image
);
EXPECT_TRUE
(
image
!=
nullptr
);
image
=
Execute
(
vision
::
Normalize
({
121.0
,
115.0
,
100.0
},
{
70.0
,
68.0
,
71.0
}))(
image
);
EXPECT_TRUE
(
image
!=
nullptr
);
image
=
Execute
(
vision
::
Resize
({
224
,
224
}))(
image
);
EXPECT_TRUE
(
image
!=
nullptr
);
EXPECT_TRUE
(
image
->
DimensionSize
(
0
)
==
224
);
EXPECT_TRUE
(
image
->
DimensionSize
(
1
)
==
224
);
}
auto
t_end
=
std
::
chrono
::
high_resolution_clock
::
now
();
double
elapsed_time_ms
=
std
::
chrono
::
duration
<
double
,
std
::
milli
>
(
t_end
-
t_start
).
count
();
MS_LOG
(
INFO
)
<<
"duration: "
<<
elapsed_time_ms
<<
" ms
\n
"
;
}
/*
TEST_F(MindDataTestEager, Test2) {
// string dir for image folder
std::string in_dir = datasets_root_path_ + "/testPK/data";
// run dataset with decode = on
std::shared_ptr<Dataset> ds = ImageFolder(in_dir, true, RandomSampler(false));
std::shared_ptr<TensorOperation> normalize_op = vision::Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0});
EXPECT_TRUE(normalize_op != nullptr);
std::shared_ptr<TensorOperation> resize_op = vision::Resize({224, 224});
EXPECT_TRUE(resize_op != nullptr);
ds = ds->Map({normalize_op, resize_op});
EXPECT_TRUE(ds != nullptr);
// Create an iterator over the result of the above dataset
// This will trigger the creation of the Execution Tree and launch it.
std::shared_ptr<Iterator> iter = ds->CreateIterator();
EXPECT_TRUE(iter != nullptr);
// Iterate the dataset and get each row
std::unordered_map<std::string, std::shared_ptr<Tensor>> row;
iter->GetNextRow(&row);
uint64_t i = 0;
while (row.size() != 0) {
i++;
auto image = row["image"];
MS_LOG(INFO) << "Tensor image shape: " << image->shape();
iter->GetNextRow(&row);
}
MS_LOG(WARNING) << i << ".";
iter->Stop();
}
TEST_F(MindDataTestEager, Test3) {
// string dir for image folder
ConfigManager cm = ConfigManager();
cm.set_num_parallel_workers(1);
std::string in_dir = datasets_root_path_ + "/testPK/data";
// run dataset with decode = on
std::shared_ptr<Dataset> ds = ImageFolder(in_dir, true, RandomSampler(false));
std::shared_ptr<TensorOperation> normalize_op = vision::Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0});
EXPECT_TRUE(normalize_op != nullptr);
std::shared_ptr<TensorOperation> resize_op = vision::Resize({224, 224});
EXPECT_TRUE(resize_op != nullptr);
ds = ds->Map({normalize_op, resize_op});
EXPECT_TRUE(ds != nullptr);
// Create an iterator over the result of the above dataset
// This will trigger the creation of the Execution Tree and launch it.
std::shared_ptr<Iterator> iter = ds->CreateIterator();
EXPECT_TRUE(iter != nullptr);
// Iterate the dataset and get each row
std::unordered_map<std::string, std::shared_ptr<Tensor>> row;
iter->GetNextRow(&row);
uint64_t i = 0;
while (row.size() != 0) {
i++;
auto image = row["image"];
MS_LOG(INFO) << "Tensor image shape: " << image->shape();
iter->GetNextRow(&row);
}
MS_LOG(WARNING) << i << ".";
iter->Stop();
}
TEST_F(MindDataTestEager, Test4) {
// string dir for image folder
ConfigManager cm = ConfigManager();
cm.set_num_parallel_workers(1);
std::string in_dir = datasets_root_path_ + "/testPK/data";
// run dataset with decode = on
std::shared_ptr<Dataset> ds = ImageFolder(in_dir, true, RandomSampler(false));
// Create an iterator over the result of the above dataset
// This will trigger the creation of the Execution Tree and launch it.
std::shared_ptr<Iterator> iter = ds->CreateIterator();
EXPECT_TRUE(iter != nullptr);
// Iterate the dataset and get each row
std::unordered_map<std::string, std::shared_ptr<Tensor>> row;
iter->GetNextRow(&row);
uint64_t i = 0;
while (row.size() != 0) {
i++;
auto image = row["image"];
image = Execute(vision::Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}))(image);
EXPECT_TRUE(image != nullptr);
image = Execute(vision::Resize({224, 224}))(image);
EXPECT_TRUE(image != nullptr);
MS_LOG(INFO) << "Tensor image shape: " << image->shape();
iter->GetNextRow(&row);
}
MS_LOG(WARNING) << i << ".";
iter->Stop();
}
*/
eigen
@
daf9bbec
Subproject commit daf9bbeca26e98da2eed0058835cbb04e0a30ad8
libjpeg-turbo
@
b443c541
Subproject commit b443c541b9a6fdcac214f9f003de0aa13e480ac1
opencv
@
bda89a64
Subproject commit bda89a6469aa79ecd8713967916bd754bff1d931
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录