Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
0626636d
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
0626636d
编写于
7月 12, 2018
作者:
L
Luo Tao
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' into demo
上级
24ced1d0
3c4f04b7
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
359 addition
and
16 deletion
+359
-16
paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc
...id/framework/details/scope_buffered_ssa_graph_executor.cc
+13
-2
paddle/fluid/framework/details/threaded_ssa_graph_executor.cc
...le/fluid/framework/details/threaded_ssa_graph_executor.cc
+11
-4
paddle/fluid/framework/details/threaded_ssa_graph_executor.h
paddle/fluid/framework/details/threaded_ssa_graph_executor.h
+3
-0
paddle/fluid/operators/CMakeLists.txt
paddle/fluid/operators/CMakeLists.txt
+1
-0
paddle/fluid/operators/reader/create_batch_reader_op.cc
paddle/fluid/operators/reader/create_batch_reader_op.cc
+5
-5
paddle/fluid/operators/unsqueeze_op.cc
paddle/fluid/operators/unsqueeze_op.cc
+191
-0
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+9
-0
python/CMakeLists.txt
python/CMakeLists.txt
+12
-5
python/paddle/fluid/__init__.py
python/paddle/fluid/__init__.py
+3
-0
python/paddle/fluid/tests/unittests/test_unsqueeze_op.py
python/paddle/fluid/tests/unittests/test_unsqueeze_op.py
+111
-0
未找到文件。
paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc
浏览文件 @
0626636d
...
...
@@ -13,6 +13,7 @@
// limitations under the License.
#include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h"
#include <stdexcept>
#include <string>
#include <vector>
#include "paddle/fluid/framework/executor.h"
...
...
@@ -53,8 +54,14 @@ FeedFetchList ScopeBufferedSSAGraphExecutor::Run(
}
}
}
std
::
vector
<
framework
::
LoDTensor
>
fetch_data
;
std
::
exception_ptr
eptr
;
try
{
fetch_data
=
underlying_executor_
->
Run
(
fetch_tensors
);
}
catch
(...)
{
eptr
=
std
::
current_exception
();
}
auto
fetch_data
=
underlying_executor_
->
Run
(
fetch_tensors
);
drop_scope_counter_
+=
1
;
if
(
!
fetch_tensors
.
empty
()
||
drop_scope_counter_
==
strategy_
.
num_iteration_per_drop_scope_
)
{
...
...
@@ -69,7 +76,11 @@ FeedFetchList ScopeBufferedSSAGraphExecutor::Run(
scope
->
DeleteScope
(
local_scope
);
}
}
return
fetch_data
;
if
(
eptr
)
{
std
::
rethrow_exception
(
eptr
);
}
else
{
return
fetch_data
;
}
}
}
// namespace details
}
// namespace framework
...
...
paddle/fluid/framework/details/threaded_ssa_graph_executor.cc
浏览文件 @
0626636d
...
...
@@ -78,6 +78,10 @@ FeedFetchList ThreadedSSAGraphExecutor::Run(
set
.
clear
();
};
// Clean run context
run_op_futures_
.
clear
();
exception_
.
reset
();
// Step 3. Execution
while
(
!
pending_vars
.
empty
())
{
// 1. Run All Ready ops
...
...
@@ -96,16 +100,19 @@ FeedFetchList ThreadedSSAGraphExecutor::Run(
auto
cur_ready_vars
=
ready_vars
.
PopAll
(
1
,
&
timeout
);
if
(
timeout
)
{
std
::
lock_guard
<
std
::
mutex
>
l
(
exception_mu_
);
std
::
unique_lock
<
std
::
mutex
>
l
(
exception_mu_
);
if
(
exception_
)
{
l
.
unlock
();
for
(
auto
&
run_op_future
:
run_op_futures_
)
{
run_op_future
.
wait
();
}
l
.
lock
();
std
::
exception
*
exp
=
exception_
.
get
();
if
(
dynamic_cast
<
platform
::
EOFException
*>
(
exp
))
{
auto
e
=
*
static_cast
<
platform
::
EOFException
*>
(
exp
);
exception_
.
reset
();
throw
e
;
}
else
if
(
dynamic_cast
<
platform
::
EnforceNotMet
*>
(
exp
))
{
auto
e
=
*
static_cast
<
platform
::
EnforceNotMet
*>
(
exp
);
exception_
.
reset
();
throw
e
;
}
else
{
LOG
(
FATAL
)
<<
"Unknown exception."
;
...
...
@@ -222,7 +229,7 @@ void ThreadedSSAGraphExecutor::RunOp(
}
};
if
(
pool_
)
{
pool_
->
enqueue
(
op_run
);
run_op_futures_
.
emplace_back
(
pool_
->
enqueue
(
op_run
)
);
}
else
{
op_run
();
}
...
...
paddle/fluid/framework/details/threaded_ssa_graph_executor.h
浏览文件 @
0626636d
...
...
@@ -15,6 +15,7 @@
#pragma once
#include <deque>
#include <list>
#include <string>
#include <unordered_set>
#include <utility>
...
...
@@ -77,6 +78,8 @@ class ThreadedSSAGraphExecutor : public SSAGraphExecutor {
private:
ExecutionStrategy
strategy_
;
// use std::list because clear(), push_back, and for_each are O(1)
std
::
list
<
std
::
future
<
void
>>
run_op_futures_
;
};
}
// namespace details
...
...
paddle/fluid/operators/CMakeLists.txt
浏览文件 @
0626636d
...
...
@@ -265,6 +265,7 @@ op_library(recurrent_op DEPS executor)
op_library
(
warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale
)
op_library
(
cos_sim_op DEPS cos_sim_functor
)
op_library
(
parallel_do_op DEPS executor
)
op_library
(
unsqueeze_op DEPS reshape_op
)
op_library
(
squeeze_op DEPS reshape_op
)
if
(
WITH_GPU
)
...
...
paddle/fluid/operators/reader/create_batch_reader_op.cc
浏览文件 @
0626636d
...
...
@@ -23,7 +23,7 @@ class BatchReader : public framework::DecoratedReader {
BatchReader
(
const
std
::
shared_ptr
<
ReaderBase
>&
reader
,
int
batch_size
,
bool
discard_leftover
)
:
DecoratedReader
(
reader
),
batch_size_
(
batch_size
),
batch_size_
(
static_cast
<
size_t
>
(
batch_size
)
),
discard_leftover_
(
discard_leftover
)
{
buffer_
.
reserve
(
batch_size_
);
}
...
...
@@ -31,7 +31,7 @@ class BatchReader : public framework::DecoratedReader {
void
ReadNextImpl
(
std
::
vector
<
framework
::
LoDTensor
>*
out
)
override
;
private:
in
t
batch_size_
;
size_
t
batch_size_
;
bool
discard_leftover_
;
std
::
vector
<
std
::
vector
<
framework
::
LoDTensor
>>
buffer_
;
};
...
...
@@ -78,7 +78,7 @@ class CreateBatchReaderOpMaker : public DecoratedReaderMakerBase {
void
BatchReader
::
ReadNextImpl
(
std
::
vector
<
framework
::
LoDTensor
>*
out
)
{
buffer_
.
clear
();
buffer_
.
reserve
(
batch_size_
);
for
(
in
t
i
=
0
;
i
<
batch_size_
;
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
batch_size_
;
++
i
)
{
buffer_
.
push_back
(
std
::
vector
<
framework
::
LoDTensor
>
());
reader_
->
ReadNext
(
&
buffer_
.
back
());
if
(
buffer_
.
back
().
empty
())
{
...
...
@@ -95,9 +95,9 @@ void BatchReader::ReadNextImpl(std::vector<framework::LoDTensor>* out) {
// if buffer_ is empty, the 'out' will return as an empty vector.
return
;
}
in
t
out_num
=
buffer_
[
0
].
size
();
size_
t
out_num
=
buffer_
[
0
].
size
();
out
->
reserve
(
out_num
);
for
(
in
t
j
=
0
;
j
<
out_num
;
++
j
)
{
for
(
size_
t
j
=
0
;
j
<
out_num
;
++
j
)
{
// Merge shape and check date type
std
::
type_index
batch_type
=
buffer_
[
0
][
j
].
type
();
framework
::
DDim
batch_shape
=
buffer_
[
0
][
j
].
dims
();
...
...
paddle/fluid/operators/unsqueeze_op.cc
0 → 100644
浏览文件 @
0626636d
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
namespace
paddle
{
namespace
operators
{
class
UnsqueezeOpInferShape
:
public
framework
::
InferShapeBase
{
public:
void
operator
()(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) of UnsqueezeOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Out"
),
"Output(Out) of UnsqueezeOp should not be null."
);
const
auto
&
axes
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"axes"
);
const
auto
&
x_dims
=
ctx
->
GetInputDim
(
"X"
);
// Validity Check: input tensor dims (<6).
PADDLE_ENFORCE
(
x_dims
.
size
()
<=
6
,
"Invalid dimensions, the rank of Input(X) "
"should be in the range of [1, 6] (Eigen limit)"
);
auto
out_dims
=
GetOutputShape
(
axes
,
x_dims
);
ctx
->
SetOutputDim
(
"Out"
,
out_dims
);
if
(
x_dims
[
0
]
==
out_dims
[
0
])
{
// Only pass LoD when the first dimension of output and Input(X)
// are the same.
ctx
->
ShareLoD
(
"X"
,
"Out"
);
}
}
static
framework
::
DDim
GetOutputShape
(
const
std
::
vector
<
int
>
unsqz_dims
,
const
framework
::
DDim
&
in_dims
)
{
int
output_size
=
in_dims
.
size
()
+
static_cast
<
int
>
(
unsqz_dims
.
size
());
int
cur_output_size
=
in_dims
.
size
();
std
::
vector
<
int64_t
>
output_shape
(
output_size
,
0
);
// Validity Check: rank range.
PADDLE_ENFORCE
(
output_size
<=
6
,
"The output tensor's rank should be less than 6."
);
for
(
int
axis
:
unsqz_dims
)
{
int
cur
=
axis
<
0
?
axis
+
cur_output_size
+
1
:
axis
;
// Vaildity Check: the axis bound
PADDLE_ENFORCE
(
cur
>=
0
&&
cur
<=
cur_output_size
,
"The unsqueeze dims must be within range of current rank."
);
// Move old axis, and insert new axis
for
(
int
i
=
cur_output_size
;
i
>=
cur
;
--
i
)
{
if
(
output_shape
[
i
]
==
1
)
{
// Move axis
output_shape
[
i
+
1
]
=
1
;
output_shape
[
i
]
=
0
;
}
}
output_shape
[
cur
]
=
1
;
// Add the output size.
cur_output_size
++
;
}
// Make output shape
for
(
int
in_idx
=
0
,
out_idx
=
0
;
out_idx
<
output_size
;
++
out_idx
)
{
if
(
output_shape
[
out_idx
]
==
0
)
{
output_shape
[
out_idx
]
=
in_dims
[
in_idx
++
];
}
}
return
framework
::
make_ddim
(
output_shape
);
}
};
class
UnsqueezeOp
:
public
framework
::
OperatorBase
{
public:
using
OperatorBase
::
OperatorBase
;
private:
void
RunImpl
(
const
framework
::
Scope
&
scope
,
const
platform
::
Place
&
place
)
const
override
{
auto
&
axes
=
Attr
<
std
::
vector
<
int
>>
(
"axes"
);
auto
x_dims
=
scope
.
FindVar
(
Input
(
"X"
))
->
Get
<
framework
::
LoDTensor
>
().
dims
();
auto
out_dims
=
UnsqueezeOpInferShape
::
GetOutputShape
(
axes
,
x_dims
);
framework
::
AttributeMap
attrs
;
attrs
[
"shape"
]
=
framework
::
vectorize2int
(
out_dims
);
attrs
[
"inplace"
]
=
Attr
<
bool
>
(
"inplace"
);
// Invoke Reshape op.
auto
reshape_op
=
framework
::
OpRegistry
::
CreateOp
(
"reshape"
,
{{
"X"
,
{
Input
(
"X"
)}},
{
"Shape"
,
{}}},
{{
"Out"
,
{
Output
(
"Out"
)}}},
attrs
);
reshape_op
->
Run
(
scope
,
place
);
}
};
class
UnsqueezeOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
{
AddInput
(
"X"
,
"(Tensor). The input tensor of unsqueeze operator."
);
AddOutput
(
"Out"
,
"(Tensor). The output tensor of unsqueeze operator."
);
AddAttr
<
std
::
vector
<
int
>>
(
"axes"
,
"(std::vector<int>). List of integers,"
" indicating the dimensions to be inserted"
)
.
AddCustomChecker
([](
const
std
::
vector
<
int
>
&
axes
)
{
PADDLE_ENFORCE
(
!
axes
.
empty
(),
"Invalid axes, The unsqueeze axes is empty."
);
// Validity Check: axes dims (<6).
PADDLE_ENFORCE
(
static_cast
<
int
>
(
axes
.
size
())
<
6
,
"Invalid dimensions, dynamic dimensions should be "
"within [1, 6] dimensions (Eigen limit)."
);
// Validity Check: the range of unsqueeze aixs.
for
(
int
axis
:
axes
)
{
PADDLE_ENFORCE
(
axis
<
6
,
"Invalid dimensions, input axis should be"
" within [1, 6] dimensions (Eigen limit)."
);
}
});
AddAttr
<
bool
>
(
"inplace"
,
"(default: false) Unsqueeze the source tensor's shape without "
"memory copy. When Attr(inplace) is set true, the output "
"tensor shares memory with Input(X), otherwise, a new output "
"tensor is created, and its data are copied from Input(x)."
)
.
SetDefault
(
false
);
AddComment
(
R"DOC(
Unsqueeze Operator.
Insert single-dimensional entries to the shape of a tensor.
Takes one required argument axes, a list of dimensions that will be inserted.
Dimension indices in axes are as seen in the output tensor.
For example:
Given a tensor such that tensor with shape [3, 4, 5],
then Unsqueeze(tensor, axes=[0, 4]) has shape [1, 3, 4, 5, 1]
)DOC"
);
}
};
class
UnsqueezeGradInferShape
:
public
framework
::
InferShapeBase
{
public:
void
operator
()(
framework
::
InferShapeContext
*
ctx
)
const
override
{
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
ctx
->
GetInputDim
(
"X"
));
ctx
->
ShareLoD
(
"X"
,
framework
::
GradVarName
(
"X"
));
}
};
class
UnsqueezeGradOp
:
public
framework
::
OperatorBase
{
public:
using
OperatorBase
::
OperatorBase
;
private:
void
RunImpl
(
const
framework
::
Scope
&
scope
,
const
platform
::
Place
&
place
)
const
override
{
auto
dx_name
=
Output
(
framework
::
GradVarName
(
"X"
));
auto
dout_name
=
Input
(
framework
::
GradVarName
(
"Out"
));
auto
x_dims
=
scope
.
FindVar
(
Input
(
"X"
))
->
Get
<
framework
::
LoDTensor
>
().
dims
();
framework
::
AttributeMap
attrs
;
attrs
[
"shape"
]
=
framework
::
vectorize2int
(
x_dims
);
attrs
[
"inplace"
]
=
Attr
<
bool
>
(
"inplace"
);
auto
reshape_op
=
framework
::
OpRegistry
::
CreateOp
(
"reshape"
,
{{
"X"
,
{
dout_name
}},
{
"Shape"
,
{}}},
{{
"Out"
,
{
dx_name
}}},
attrs
);
reshape_op
->
Run
(
scope
,
place
);
}
};
}
// namespace operators
}
// namespace paddle
// Tell linker to use reshape op.
USE_OP
(
reshape
);
namespace
ops
=
paddle
::
operators
;
REGISTER_OPERATOR
(
unsqueeze
,
ops
::
UnsqueezeOp
,
ops
::
UnsqueezeOpMaker
,
ops
::
UnsqueezeOpInferShape
,
paddle
::
framework
::
DefaultGradOpDescMaker
<
true
>
);
REGISTER_OPERATOR
(
unsqueeze_grad
,
ops
::
UnsqueezeGradOp
,
ops
::
UnsqueezeGradInferShape
);
paddle/fluid/pybind/pybind.cc
浏览文件 @
0626636d
...
...
@@ -66,6 +66,14 @@ bool IsCompiledWithCUDA() {
#endif
}
bool
IsCompiledWithDIST
()
{
#ifdef PADDLE_WITH_DIST
return
true
;
#else
return
false
;
#endif
}
PYBIND11_PLUGIN
(
core
)
{
py
::
module
m
(
"core"
,
"C++ core of PaddlePaddle"
);
...
...
@@ -508,6 +516,7 @@ All parameter, weight, gradient are variables in Paddle.
[](
bool
init_p2p
)
{
framework
::
InitDevices
(
init_p2p
);
});
m
.
def
(
"is_compiled_with_cuda"
,
IsCompiledWithCUDA
);
m
.
def
(
"is_compiled_with_dist"
,
IsCompiledWithDIST
);
#ifdef PADDLE_WITH_CUDA
m
.
def
(
"is_float16_supported"
,
[](
const
platform
::
CUDAPlace
&
place
)
->
bool
{
// Only GPUs with Compute Capability >= 53 support float16
...
...
python/CMakeLists.txt
浏览文件 @
0626636d
...
...
@@ -92,8 +92,15 @@ install(DIRECTORY ${PADDLE_PYTHON_PACKAGE_DIR}
DESTINATION opt/paddle/share/wheels
)
find_program
(
PATCHELF_EXECUTABLE patchelf
)
if
(
NOT PATCHELF_EXECUTABLE
)
message
(
FATAL_ERROR
"patchelf not found, please install it.
\n
"
"For Ubuntu, the command is: apt-get install -y patchelf."
)
endif
()
if
(
APPLE
)
find_program
(
INSTALL_NAME_TOOL_EXECUTABLE install_name_tool
)
if
(
NOT INSTALL_NAME_TOOL_EXECUTABLE
)
message
(
FATAL_ERROR
"install_name_tool not found, please check.
\n
"
)
endif
()
else
(
APPLE
)
find_program
(
PATCHELF_EXECUTABLE patchelf
)
if
(
NOT PATCHELF_EXECUTABLE
)
message
(
FATAL_ERROR
"patchelf not found, please install it.
\n
"
"For Ubuntu, the command is: apt-get install -y patchelf."
)
endif
()
endif
(
APPLE
)
python/paddle/fluid/__init__.py
浏览文件 @
0626636d
...
...
@@ -121,6 +121,9 @@ def __bootstrap__():
'eager_delete_scope'
,
'use_mkldnn'
,
'initial_cpu_memory_in_mb'
,
'init_allocated_mem'
]
if
core
.
is_compiled_with_dist
():
read_env_flags
.
append
(
'rpc_deadline'
)
if
core
.
is_compiled_with_cuda
():
read_env_flags
+=
[
'fraction_of_gpu_memory_to_use'
,
'cudnn_deterministic'
...
...
python/paddle/fluid/tests/unittests/test_unsqueeze_op.py
0 → 100644
浏览文件 @
0626636d
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
# Correct: General.
class
TestUnsqueezeOp
(
OpTest
):
def
setUp
(
self
):
self
.
init_test_case
()
self
.
op_type
=
"unsqueeze"
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
self
.
ori_shape
).
astype
(
"float32"
)}
self
.
init_attrs
()
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
self
.
new_shape
)}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
"X"
],
"Out"
)
def
init_test_case
(
self
):
self
.
ori_shape
=
(
3
,
5
)
self
.
axes
=
(
1
,
2
)
self
.
new_shape
=
(
3
,
1
,
1
,
5
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"axes"
:
self
.
axes
,
"inplace"
:
False
}
# Correct: Single input index.
class
TestUnsqueezeOp1
(
TestUnsqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
3
,
5
)
self
.
axes
=
(
-
1
,
)
self
.
new_shape
=
(
3
,
5
,
1
)
# Correct: Mixed input axis.
class
TestUnsqueezeOp2
(
TestUnsqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
3
,
5
)
self
.
axes
=
(
0
,
-
1
)
self
.
new_shape
=
(
1
,
3
,
5
,
1
)
# Correct: There is duplicated axis.
class
TestUnsqueezeOp3
(
TestUnsqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
3
,
2
,
5
)
self
.
axes
=
(
0
,
3
,
3
)
self
.
new_shape
=
(
1
,
3
,
2
,
1
,
1
,
5
)
# Correct: Reversed axes.
class
TestUnsqueezeOp4
(
TestUnsqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
3
,
2
,
5
)
self
.
axes
=
(
3
,
1
,
1
)
self
.
new_shape
=
(
3
,
1
,
1
,
2
,
5
,
1
)
# Correct: Inplace.
class
TestUnsqueezeOpInplace1
(
TestUnsqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
3
,
5
)
self
.
axes
=
(
0
,
2
)
self
.
new_shape
=
(
1
,
3
,
1
,
5
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"axes"
:
self
.
axes
,
"inplace"
:
True
}
# Correct: Inplace. There is mins index.
class
TestUnsqueezeOpInplace2
(
TestUnsqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
3
,
5
)
self
.
axes
=
(
0
,
-
2
)
self
.
new_shape
=
(
1
,
3
,
1
,
5
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"axes"
:
self
.
axes
,
"inplace"
:
True
}
# Correct: Inplace. There is duplicated axis.
class
TestUnsqueezeOpInplace3
(
TestUnsqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
3
,
2
,
5
)
self
.
axes
=
(
0
,
3
,
3
)
self
.
new_shape
=
(
1
,
3
,
2
,
1
,
1
,
5
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"axes"
:
self
.
axes
,
"inplace"
:
True
}
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录