Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
971cf705
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
971cf705
编写于
7月 12, 2018
作者:
Q
qiaolongfei
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into fix-optimizer-accumulator
上级
d8220ccb
6ff7f238
变更
23
隐藏空白更改
内联
并排
Showing
23 changed file
with
754 addition
and
64 deletion
+754
-64
paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc
...id/framework/details/scope_buffered_ssa_graph_executor.cc
+13
-2
paddle/fluid/framework/details/threaded_ssa_graph_executor.cc
...le/fluid/framework/details/threaded_ssa_graph_executor.cc
+11
-4
paddle/fluid/framework/details/threaded_ssa_graph_executor.h
paddle/fluid/framework/details/threaded_ssa_graph_executor.h
+3
-0
paddle/fluid/operators/CMakeLists.txt
paddle/fluid/operators/CMakeLists.txt
+2
-0
paddle/fluid/operators/reader/create_batch_reader_op.cc
paddle/fluid/operators/reader/create_batch_reader_op.cc
+5
-5
paddle/fluid/operators/squeeze_op.cc
paddle/fluid/operators/squeeze_op.cc
+202
-0
paddle/fluid/operators/unsqueeze_op.cc
paddle/fluid/operators/unsqueeze_op.cc
+191
-0
python/CMakeLists.txt
python/CMakeLists.txt
+12
-5
python/paddle/fluid/annotations.py
python/paddle/fluid/annotations.py
+38
-0
python/paddle/fluid/backward.py
python/paddle/fluid/backward.py
+1
-4
python/paddle/fluid/layers/device.py
python/paddle/fluid/layers/device.py
+3
-1
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+12
-12
python/paddle/fluid/tests/book/notest_understand_sentiment.py
...on/paddle/fluid/tests/book/notest_understand_sentiment.py
+2
-2
python/paddle/fluid/tests/book/test_recognize_digits.py
python/paddle/fluid/tests/book/test_recognize_digits.py
+10
-8
python/paddle/fluid/tests/book/test_word2vec.py
python/paddle/fluid/tests/book/test_word2vec.py
+2
-1
python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py
.../tests/book_memory_optimization/test_memopt_fit_a_line.py
+5
-4
python/paddle/fluid/tests/unittests/test_calc_gradient.py
python/paddle/fluid/tests/unittests/test_calc_gradient.py
+0
-2
python/paddle/fluid/tests/unittests/test_get_places_op.py
python/paddle/fluid/tests/unittests/test_get_places_op.py
+2
-1
python/paddle/fluid/tests/unittests/test_layers.py
python/paddle/fluid/tests/unittests/test_layers.py
+2
-1
python/paddle/fluid/tests/unittests/test_optimizer.py
python/paddle/fluid/tests/unittests/test_optimizer.py
+11
-11
python/paddle/fluid/tests/unittests/test_parallel_op.py
python/paddle/fluid/tests/unittests/test_parallel_op.py
+2
-1
python/paddle/fluid/tests/unittests/test_squeeze_op.py
python/paddle/fluid/tests/unittests/test_squeeze_op.py
+114
-0
python/paddle/fluid/tests/unittests/test_unsqueeze_op.py
python/paddle/fluid/tests/unittests/test_unsqueeze_op.py
+111
-0
未找到文件。
paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc
浏览文件 @
971cf705
...
...
@@ -13,6 +13,7 @@
// limitations under the License.
#include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h"
#include <stdexcept>
#include <string>
#include <vector>
#include "paddle/fluid/framework/executor.h"
...
...
@@ -53,8 +54,14 @@ FeedFetchList ScopeBufferedSSAGraphExecutor::Run(
}
}
}
std
::
vector
<
framework
::
LoDTensor
>
fetch_data
;
std
::
exception_ptr
eptr
;
try
{
fetch_data
=
underlying_executor_
->
Run
(
fetch_tensors
);
}
catch
(...)
{
eptr
=
std
::
current_exception
();
}
auto
fetch_data
=
underlying_executor_
->
Run
(
fetch_tensors
);
drop_scope_counter_
+=
1
;
if
(
!
fetch_tensors
.
empty
()
||
drop_scope_counter_
==
strategy_
.
num_iteration_per_drop_scope_
)
{
...
...
@@ -69,7 +76,11 @@ FeedFetchList ScopeBufferedSSAGraphExecutor::Run(
scope
->
DeleteScope
(
local_scope
);
}
}
return
fetch_data
;
if
(
eptr
)
{
std
::
rethrow_exception
(
eptr
);
}
else
{
return
fetch_data
;
}
}
}
// namespace details
}
// namespace framework
...
...
paddle/fluid/framework/details/threaded_ssa_graph_executor.cc
浏览文件 @
971cf705
...
...
@@ -78,6 +78,10 @@ FeedFetchList ThreadedSSAGraphExecutor::Run(
set
.
clear
();
};
// Clean run context
run_op_futures_
.
clear
();
exception_
.
reset
();
// Step 3. Execution
while
(
!
pending_vars
.
empty
())
{
// 1. Run All Ready ops
...
...
@@ -96,16 +100,19 @@ FeedFetchList ThreadedSSAGraphExecutor::Run(
auto
cur_ready_vars
=
ready_vars
.
PopAll
(
1
,
&
timeout
);
if
(
timeout
)
{
std
::
lock_guard
<
std
::
mutex
>
l
(
exception_mu_
);
std
::
unique_lock
<
std
::
mutex
>
l
(
exception_mu_
);
if
(
exception_
)
{
l
.
unlock
();
for
(
auto
&
run_op_future
:
run_op_futures_
)
{
run_op_future
.
wait
();
}
l
.
lock
();
std
::
exception
*
exp
=
exception_
.
get
();
if
(
dynamic_cast
<
platform
::
EOFException
*>
(
exp
))
{
auto
e
=
*
static_cast
<
platform
::
EOFException
*>
(
exp
);
exception_
.
reset
();
throw
e
;
}
else
if
(
dynamic_cast
<
platform
::
EnforceNotMet
*>
(
exp
))
{
auto
e
=
*
static_cast
<
platform
::
EnforceNotMet
*>
(
exp
);
exception_
.
reset
();
throw
e
;
}
else
{
LOG
(
FATAL
)
<<
"Unknown exception."
;
...
...
@@ -222,7 +229,7 @@ void ThreadedSSAGraphExecutor::RunOp(
}
};
if
(
pool_
)
{
pool_
->
enqueue
(
op_run
);
run_op_futures_
.
emplace_back
(
pool_
->
enqueue
(
op_run
)
);
}
else
{
op_run
();
}
...
...
paddle/fluid/framework/details/threaded_ssa_graph_executor.h
浏览文件 @
971cf705
...
...
@@ -15,6 +15,7 @@
#pragma once
#include <deque>
#include <list>
#include <string>
#include <unordered_set>
#include <utility>
...
...
@@ -77,6 +78,8 @@ class ThreadedSSAGraphExecutor : public SSAGraphExecutor {
private:
ExecutionStrategy
strategy_
;
// use std::list because clear(), push_back, and for_each are O(1)
std
::
list
<
std
::
future
<
void
>>
run_op_futures_
;
};
}
// namespace details
...
...
paddle/fluid/operators/CMakeLists.txt
浏览文件 @
971cf705
...
...
@@ -265,6 +265,8 @@ op_library(recurrent_op DEPS executor)
op_library
(
warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale
)
op_library
(
cos_sim_op DEPS cos_sim_functor
)
op_library
(
parallel_do_op DEPS executor
)
op_library
(
unsqueeze_op DEPS reshape_op
)
op_library
(
squeeze_op DEPS reshape_op
)
if
(
WITH_GPU
)
op_library
(
conv_op DEPS vol2col depthwise_conv im2col
)
...
...
paddle/fluid/operators/reader/create_batch_reader_op.cc
浏览文件 @
971cf705
...
...
@@ -23,7 +23,7 @@ class BatchReader : public framework::DecoratedReader {
BatchReader
(
const
std
::
shared_ptr
<
ReaderBase
>&
reader
,
int
batch_size
,
bool
discard_leftover
)
:
DecoratedReader
(
reader
),
batch_size_
(
batch_size
),
batch_size_
(
static_cast
<
size_t
>
(
batch_size
)
),
discard_leftover_
(
discard_leftover
)
{
buffer_
.
reserve
(
batch_size_
);
}
...
...
@@ -31,7 +31,7 @@ class BatchReader : public framework::DecoratedReader {
void
ReadNextImpl
(
std
::
vector
<
framework
::
LoDTensor
>*
out
)
override
;
private:
in
t
batch_size_
;
size_
t
batch_size_
;
bool
discard_leftover_
;
std
::
vector
<
std
::
vector
<
framework
::
LoDTensor
>>
buffer_
;
};
...
...
@@ -78,7 +78,7 @@ class CreateBatchReaderOpMaker : public DecoratedReaderMakerBase {
void
BatchReader
::
ReadNextImpl
(
std
::
vector
<
framework
::
LoDTensor
>*
out
)
{
buffer_
.
clear
();
buffer_
.
reserve
(
batch_size_
);
for
(
in
t
i
=
0
;
i
<
batch_size_
;
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
batch_size_
;
++
i
)
{
buffer_
.
push_back
(
std
::
vector
<
framework
::
LoDTensor
>
());
reader_
->
ReadNext
(
&
buffer_
.
back
());
if
(
buffer_
.
back
().
empty
())
{
...
...
@@ -95,9 +95,9 @@ void BatchReader::ReadNextImpl(std::vector<framework::LoDTensor>* out) {
// if buffer_ is empty, the 'out' will return as an empty vector.
return
;
}
in
t
out_num
=
buffer_
[
0
].
size
();
size_
t
out_num
=
buffer_
[
0
].
size
();
out
->
reserve
(
out_num
);
for
(
in
t
j
=
0
;
j
<
out_num
;
++
j
)
{
for
(
size_
t
j
=
0
;
j
<
out_num
;
++
j
)
{
// Merge shape and check date type
std
::
type_index
batch_type
=
buffer_
[
0
][
j
].
type
();
framework
::
DDim
batch_shape
=
buffer_
[
0
][
j
].
dims
();
...
...
paddle/fluid/operators/squeeze_op.cc
0 → 100644
浏览文件 @
971cf705
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
namespace
paddle
{
namespace
operators
{
class
SqueezeOpInferShape
:
public
framework
::
InferShapeBase
{
public:
void
operator
()(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) of SqueezeOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Out"
),
"Output(Out) of SqueezeOp should not be null."
);
const
auto
&
x_dims
=
ctx
->
GetInputDim
(
"X"
);
// Check input tensor dims (<6) Eigen limit.
PADDLE_ENFORCE
(
x_dims
.
size
()
<=
6
,
"Invalid dimnesions, the rank of Input(X) "
"should be in the range of [1, 6] (Eigen limit)."
);
const
auto
&
axes
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"axes"
);
for
(
int
a
:
axes
)
{
PADDLE_ENFORCE_LT
(
a
,
x_dims
.
size
(),
"The squeeze axis should be less than input "
"tensor's rank."
);
}
auto
out_dims
=
GetOutputShape
(
axes
,
x_dims
);
ctx
->
SetOutputDim
(
"Out"
,
out_dims
);
if
(
x_dims
[
0
]
==
out_dims
[
0
])
{
// Only pass LoD when the first dimension of output and Input(X)
// are the same.
ctx
->
ShareLoD
(
"X"
,
"Out"
);
}
}
static
framework
::
DDim
GetOutputShape
(
const
std
::
vector
<
int
>
squeeze_dims
,
const
framework
::
DDim
&
in_dims
)
{
size_t
num_squeeze_dims
=
squeeze_dims
.
size
();
int
cnt_squeezed_dims
=
0
;
bool
should_squeeze
[
9
]
=
{
false
};
// Determines number of dimensions of output tensor after squeeze.
// Mark and count the dimensions need to be squeezed
if
(
num_squeeze_dims
==
0
)
{
for
(
int
idx
=
0
;
idx
<
in_dims
.
size
();
++
idx
)
{
if
(
in_dims
[
idx
]
==
1
)
{
should_squeeze
[
idx
]
=
true
;
++
cnt_squeezed_dims
;
}
}
}
else
{
for
(
size_t
idx
=
0
;
idx
<
num_squeeze_dims
;
++
idx
)
{
int
current
=
squeeze_dims
[
idx
]
<
0
?
squeeze_dims
[
idx
]
+
in_dims
.
size
()
:
squeeze_dims
[
idx
];
// Check current index, the upper limit has beed checked in line 36.
PADDLE_ENFORCE
(
current
>=
0
,
"Invalid axis, the negative axis is out of range."
);
PADDLE_ENFORCE
(
in_dims
[
current
]
==
1
,
"Invalid axis index, the axis that will be squeezed "
"should be equal to 1."
);
if
(
!
(
should_squeeze
[
current
]))
{
++
cnt_squeezed_dims
;
}
should_squeeze
[
current
]
=
true
;
}
}
// Make output dimensions
std
::
vector
<
int64_t
>
output_shape
(
in_dims
.
size
()
-
cnt_squeezed_dims
,
0
);
for
(
int
in_idx
=
0
,
out_idx
=
0
;
in_idx
<
in_dims
.
size
();
++
in_idx
)
{
if
(
!
should_squeeze
[
in_idx
])
{
output_shape
[
out_idx
++
]
=
in_dims
[
in_idx
];
}
}
return
framework
::
make_ddim
(
output_shape
);
}
};
class
SqueezeOp
:
public
framework
::
OperatorBase
{
public:
using
OperatorBase
::
OperatorBase
;
private:
void
RunImpl
(
const
framework
::
Scope
&
scope
,
const
platform
::
Place
&
place
)
const
override
{
auto
&
axes
=
Attr
<
std
::
vector
<
int
>>
(
"axes"
);
auto
x_dims
=
scope
.
FindVar
(
Input
(
"X"
))
->
Get
<
framework
::
LoDTensor
>
().
dims
();
auto
out_dims
=
SqueezeOpInferShape
::
GetOutputShape
(
axes
,
x_dims
);
framework
::
AttributeMap
attrs
;
attrs
[
"shape"
]
=
framework
::
vectorize2int
(
out_dims
);
attrs
[
"inplace"
]
=
Attr
<
bool
>
(
"inplace"
);
// Invoke Reshape Op
auto
reshape_op
=
framework
::
OpRegistry
::
CreateOp
(
"reshape"
,
{{
"X"
,
{
Input
(
"X"
)}},
{
"Shape"
,
{}}},
{{
"Out"
,
{
Output
(
"Out"
)}}},
attrs
);
reshape_op
->
Run
(
scope
,
place
);
}
};
class
SqueezeOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
{
AddInput
(
"X"
,
"(Tensor). The input tensor of squeeze operator."
);
AddOutput
(
"Out"
,
"(Tensor). The output tensor of squeeze operator."
);
AddAttr
<
std
::
vector
<
int
>>
(
"axes"
,
"(std::vector<int>). List of integers,"
" indicating the dimensions to squeeze."
)
.
SetDefault
({});
AddAttr
<
bool
>
(
"inplace"
,
"(default: false) Squeeze the source tensor's shape without "
"memory copy. When Attr(inplace) is set true, the output "
"tensor shares memory with Input(X), otherwise, a new output "
"tensor is created, and its data are copied from Input(x)."
)
.
SetDefault
(
false
);
AddComment
(
R"DOC(
Squeeze Operator.
Remove single-dimensional entries from the shape of a tensor.
Takes a parameter axes with a list of axes to squeeze.
If axes is not provided, all the single dimensions will be removed from the shape.
If an axis is selected with shape entry not equal to one, an error is raised.
Examples:
Case 1:
Given
X.shape = (1, 3, 1, 5)
and
axes = [0]
we get:
Out.shape = (3, 1, 5)
Case 2:
Given
X.shape = (1, 3, 1, 5)
and
axes = []
we get:
Out.shape = (3, 5)
)DOC"
);
}
};
class
SqueezeGradInferShape
:
public
framework
::
InferShapeBase
{
public:
void
operator
()(
framework
::
InferShapeContext
*
context
)
const
override
{
context
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
context
->
GetInputDim
(
"X"
));
context
->
ShareLoD
(
"X"
,
framework
::
GradVarName
(
"X"
));
}
};
class
SqueezeGradOp
:
public
framework
::
OperatorBase
{
public:
using
OperatorBase
::
OperatorBase
;
private:
void
RunImpl
(
const
framework
::
Scope
&
scope
,
const
platform
::
Place
&
place
)
const
override
{
auto
dx_name
=
Output
(
framework
::
GradVarName
(
"X"
));
auto
dout_name
=
Input
(
framework
::
GradVarName
(
"Out"
));
auto
x_dims
=
scope
.
FindVar
(
Input
(
"X"
))
->
Get
<
framework
::
LoDTensor
>
().
dims
();
framework
::
AttributeMap
attrs
;
attrs
[
"shape"
]
=
framework
::
vectorize2int
(
x_dims
);
attrs
[
"inplace"
]
=
Attr
<
bool
>
(
"inplace"
);
auto
reshape_op
=
framework
::
OpRegistry
::
CreateOp
(
"reshape"
,
{{
"X"
,
{
dout_name
}},
{
"Shape"
,
{}}},
{{
"Out"
,
{
dx_name
}}},
attrs
);
reshape_op
->
Run
(
scope
,
place
);
}
};
}
// namespace operators
}
// namespace paddle
// Tell linker to use reshape op
USE_OP
(
reshape
);
namespace
ops
=
paddle
::
operators
;
REGISTER_OPERATOR
(
squeeze
,
ops
::
SqueezeOp
,
ops
::
SqueezeOpMaker
,
ops
::
SqueezeOpInferShape
,
paddle
::
framework
::
DefaultGradOpDescMaker
<
true
>
);
REGISTER_OPERATOR
(
squeeze_grad
,
ops
::
SqueezeGradOp
,
ops
::
SqueezeGradInferShape
);
paddle/fluid/operators/unsqueeze_op.cc
0 → 100644
浏览文件 @
971cf705
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
namespace
paddle
{
namespace
operators
{
class
UnsqueezeOpInferShape
:
public
framework
::
InferShapeBase
{
public:
void
operator
()(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) of UnsqueezeOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Out"
),
"Output(Out) of UnsqueezeOp should not be null."
);
const
auto
&
axes
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"axes"
);
const
auto
&
x_dims
=
ctx
->
GetInputDim
(
"X"
);
// Validity Check: input tensor dims (<6).
PADDLE_ENFORCE
(
x_dims
.
size
()
<=
6
,
"Invalid dimensions, the rank of Input(X) "
"should be in the range of [1, 6] (Eigen limit)"
);
auto
out_dims
=
GetOutputShape
(
axes
,
x_dims
);
ctx
->
SetOutputDim
(
"Out"
,
out_dims
);
if
(
x_dims
[
0
]
==
out_dims
[
0
])
{
// Only pass LoD when the first dimension of output and Input(X)
// are the same.
ctx
->
ShareLoD
(
"X"
,
"Out"
);
}
}
static
framework
::
DDim
GetOutputShape
(
const
std
::
vector
<
int
>
unsqz_dims
,
const
framework
::
DDim
&
in_dims
)
{
int
output_size
=
in_dims
.
size
()
+
static_cast
<
int
>
(
unsqz_dims
.
size
());
int
cur_output_size
=
in_dims
.
size
();
std
::
vector
<
int64_t
>
output_shape
(
output_size
,
0
);
// Validity Check: rank range.
PADDLE_ENFORCE
(
output_size
<=
6
,
"The output tensor's rank should be less than 6."
);
for
(
int
axis
:
unsqz_dims
)
{
int
cur
=
axis
<
0
?
axis
+
cur_output_size
+
1
:
axis
;
// Vaildity Check: the axis bound
PADDLE_ENFORCE
(
cur
>=
0
&&
cur
<=
cur_output_size
,
"The unsqueeze dims must be within range of current rank."
);
// Move old axis, and insert new axis
for
(
int
i
=
cur_output_size
;
i
>=
cur
;
--
i
)
{
if
(
output_shape
[
i
]
==
1
)
{
// Move axis
output_shape
[
i
+
1
]
=
1
;
output_shape
[
i
]
=
0
;
}
}
output_shape
[
cur
]
=
1
;
// Add the output size.
cur_output_size
++
;
}
// Make output shape
for
(
int
in_idx
=
0
,
out_idx
=
0
;
out_idx
<
output_size
;
++
out_idx
)
{
if
(
output_shape
[
out_idx
]
==
0
)
{
output_shape
[
out_idx
]
=
in_dims
[
in_idx
++
];
}
}
return
framework
::
make_ddim
(
output_shape
);
}
};
class
UnsqueezeOp
:
public
framework
::
OperatorBase
{
public:
using
OperatorBase
::
OperatorBase
;
private:
void
RunImpl
(
const
framework
::
Scope
&
scope
,
const
platform
::
Place
&
place
)
const
override
{
auto
&
axes
=
Attr
<
std
::
vector
<
int
>>
(
"axes"
);
auto
x_dims
=
scope
.
FindVar
(
Input
(
"X"
))
->
Get
<
framework
::
LoDTensor
>
().
dims
();
auto
out_dims
=
UnsqueezeOpInferShape
::
GetOutputShape
(
axes
,
x_dims
);
framework
::
AttributeMap
attrs
;
attrs
[
"shape"
]
=
framework
::
vectorize2int
(
out_dims
);
attrs
[
"inplace"
]
=
Attr
<
bool
>
(
"inplace"
);
// Invoke Reshape op.
auto
reshape_op
=
framework
::
OpRegistry
::
CreateOp
(
"reshape"
,
{{
"X"
,
{
Input
(
"X"
)}},
{
"Shape"
,
{}}},
{{
"Out"
,
{
Output
(
"Out"
)}}},
attrs
);
reshape_op
->
Run
(
scope
,
place
);
}
};
class
UnsqueezeOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
{
AddInput
(
"X"
,
"(Tensor). The input tensor of unsqueeze operator."
);
AddOutput
(
"Out"
,
"(Tensor). The output tensor of unsqueeze operator."
);
AddAttr
<
std
::
vector
<
int
>>
(
"axes"
,
"(std::vector<int>). List of integers,"
" indicating the dimensions to be inserted"
)
.
AddCustomChecker
([](
const
std
::
vector
<
int
>
&
axes
)
{
PADDLE_ENFORCE
(
!
axes
.
empty
(),
"Invalid axes, The unsqueeze axes is empty."
);
// Validity Check: axes dims (<6).
PADDLE_ENFORCE
(
static_cast
<
int
>
(
axes
.
size
())
<
6
,
"Invalid dimensions, dynamic dimensions should be "
"within [1, 6] dimensions (Eigen limit)."
);
// Validity Check: the range of unsqueeze aixs.
for
(
int
axis
:
axes
)
{
PADDLE_ENFORCE
(
axis
<
6
,
"Invalid dimensions, input axis should be"
" within [1, 6] dimensions (Eigen limit)."
);
}
});
AddAttr
<
bool
>
(
"inplace"
,
"(default: false) Unsqueeze the source tensor's shape without "
"memory copy. When Attr(inplace) is set true, the output "
"tensor shares memory with Input(X), otherwise, a new output "
"tensor is created, and its data are copied from Input(x)."
)
.
SetDefault
(
false
);
AddComment
(
R"DOC(
Unsqueeze Operator.
Insert single-dimensional entries to the shape of a tensor.
Takes one required argument axes, a list of dimensions that will be inserted.
Dimension indices in axes are as seen in the output tensor.
For example:
Given a tensor such that tensor with shape [3, 4, 5],
then Unsqueeze(tensor, axes=[0, 4]) has shape [1, 3, 4, 5, 1]
)DOC"
);
}
};
class
UnsqueezeGradInferShape
:
public
framework
::
InferShapeBase
{
public:
void
operator
()(
framework
::
InferShapeContext
*
ctx
)
const
override
{
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
ctx
->
GetInputDim
(
"X"
));
ctx
->
ShareLoD
(
"X"
,
framework
::
GradVarName
(
"X"
));
}
};
class
UnsqueezeGradOp
:
public
framework
::
OperatorBase
{
public:
using
OperatorBase
::
OperatorBase
;
private:
void
RunImpl
(
const
framework
::
Scope
&
scope
,
const
platform
::
Place
&
place
)
const
override
{
auto
dx_name
=
Output
(
framework
::
GradVarName
(
"X"
));
auto
dout_name
=
Input
(
framework
::
GradVarName
(
"Out"
));
auto
x_dims
=
scope
.
FindVar
(
Input
(
"X"
))
->
Get
<
framework
::
LoDTensor
>
().
dims
();
framework
::
AttributeMap
attrs
;
attrs
[
"shape"
]
=
framework
::
vectorize2int
(
x_dims
);
attrs
[
"inplace"
]
=
Attr
<
bool
>
(
"inplace"
);
auto
reshape_op
=
framework
::
OpRegistry
::
CreateOp
(
"reshape"
,
{{
"X"
,
{
dout_name
}},
{
"Shape"
,
{}}},
{{
"Out"
,
{
dx_name
}}},
attrs
);
reshape_op
->
Run
(
scope
,
place
);
}
};
}
// namespace operators
}
// namespace paddle
// Tell linker to use reshape op.
USE_OP
(
reshape
);
namespace
ops
=
paddle
::
operators
;
REGISTER_OPERATOR
(
unsqueeze
,
ops
::
UnsqueezeOp
,
ops
::
UnsqueezeOpMaker
,
ops
::
UnsqueezeOpInferShape
,
paddle
::
framework
::
DefaultGradOpDescMaker
<
true
>
);
REGISTER_OPERATOR
(
unsqueeze_grad
,
ops
::
UnsqueezeGradOp
,
ops
::
UnsqueezeGradInferShape
);
python/CMakeLists.txt
浏览文件 @
971cf705
...
...
@@ -92,8 +92,15 @@ install(DIRECTORY ${PADDLE_PYTHON_PACKAGE_DIR}
DESTINATION opt/paddle/share/wheels
)
find_program
(
PATCHELF_EXECUTABLE patchelf
)
if
(
NOT PATCHELF_EXECUTABLE
)
message
(
FATAL_ERROR
"patchelf not found, please install it.
\n
"
"For Ubuntu, the command is: apt-get install -y patchelf."
)
endif
()
if
(
APPLE
)
find_program
(
INSTALL_NAME_TOOL_EXECUTABLE install_name_tool
)
if
(
NOT INSTALL_NAME_TOOL_EXECUTABLE
)
message
(
FATAL_ERROR
"install_name_tool not found, please check.
\n
"
)
endif
()
else
(
APPLE
)
find_program
(
PATCHELF_EXECUTABLE patchelf
)
if
(
NOT PATCHELF_EXECUTABLE
)
message
(
FATAL_ERROR
"patchelf not found, please install it.
\n
"
"For Ubuntu, the command is: apt-get install -y patchelf."
)
endif
()
endif
(
APPLE
)
python/paddle/fluid/annotations.py
0 → 100644
浏览文件 @
971cf705
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
functools
import
sys
__all__
=
[
'deprecated'
]
def
deprecated
(
since
,
instead
,
extra_message
=
""
):
def
decorator
(
func
):
err_msg
=
"API {0} is deprecated since {1}. Please use {2} instead."
.
format
(
func
.
__name__
,
since
,
instead
)
if
len
(
extra_message
)
!=
0
:
err_msg
+=
"
\n
"
err_msg
+=
extra_message
@
functools
.
wraps
(
func
)
def
wrapper
(
*
args
,
**
kwargs
):
print
>>
sys
.
stderr
,
err_msg
return
func
(
*
args
,
**
kwargs
)
wrapper
.
__doc__
+=
"
\n
"
wrapper
.
__doc__
+=
err_msg
return
wrapper
return
decorator
python/paddle/fluid/backward.py
浏览文件 @
971cf705
...
...
@@ -18,10 +18,7 @@ import collections
import
copy
import
unique_name
__all__
=
[
'append_backward'
,
'calc_gradient'
,
]
__all__
=
[
'append_backward'
]
def
_rename_arg_
(
op_descs
,
old_name
,
new_name
,
begin_idx
=
None
,
end_idx
=
None
):
...
...
python/paddle/fluid/layers/device.py
浏览文件 @
971cf705
...
...
@@ -18,10 +18,12 @@ All util layers.
from
layer_function_generator
import
autodoc
from
..framework
import
unique_name
from
..layer_helper
import
LayerHelper
from
..annotations
import
deprecated
__all__
=
[
'get_places'
]
__all__
=
[]
@
deprecated
(
since
=
'0.15.0'
,
instead
=
"ParallelExecutor"
)
@
autodoc
()
def
get_places
(
device_count
=
None
,
device_type
=
None
):
helper
=
LayerHelper
(
'get_places'
,
**
locals
())
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
971cf705
...
...
@@ -29,7 +29,7 @@ __all__ = [
'SGD'
,
'Momentum'
,
'Adagrad'
,
'Adam'
,
'Adamax'
,
'DecayedAdagrad'
,
'Ftrl'
,
'SGDOptimizer'
,
'MomentumOptimizer'
,
'AdagradOptimizer'
,
'AdamOptimizer'
,
'AdamaxOptimizer'
,
'DecayedAdagradOptimizer'
,
'RMSPropOptimizer'
,
'FtrlOptimizer'
,
'Adadelta'
,
'ModelAverage'
,
'
Optimizer'
,
'
RMSPropOptimizer'
'FtrlOptimizer'
,
'Adadelta'
,
'ModelAverage'
,
'RMSPropOptimizer'
]
...
...
@@ -67,7 +67,7 @@ class Optimizer(object):
self
.
_LARS_weight_decay
=
LARS_weight_decay
def
_create_global_learning_rate
(
self
):
lr
=
self
.
global_learning_rate
()
lr
=
self
.
_
global_learning_rate
()
if
isinstance
(
lr
,
framework
.
Variable
):
return
...
...
@@ -86,7 +86,7 @@ class Optimizer(object):
dtype
=
'float32'
if
self
.
_dtype
==
None
else
self
.
_dtype
,
persistable
=
True
)
def
global_learning_rate
(
self
,
program
=
None
):
def
_
global_learning_rate
(
self
,
program
=
None
):
"""
get global decayed learning rate
:return:
...
...
@@ -110,9 +110,9 @@ class Optimizer(object):
return
param_lr
else
:
if
param_lr
==
1.0
:
return
self
.
global_learning_rate
()
return
self
.
_
global_learning_rate
()
else
:
return
self
.
global_learning_rate
()
*
param_lr
return
self
.
_
global_learning_rate
()
*
param_lr
def
_create_accumulators
(
self
,
block
,
parameters
):
"""Create all accumulators needed by the parameters
...
...
@@ -185,10 +185,10 @@ class Optimizer(object):
format
(
name
,
param
.
name
))
return
self
.
_accumulators
[
name
][
param
.
name
]
def
create_optimization_pass
(
self
,
parameters_and_grads
,
loss
,
startup_program
=
None
):
def
_
create_optimization_pass
(
self
,
parameters_and_grads
,
loss
,
startup_program
=
None
):
"""Add optimization operators to update gradients to variables.
Args:
...
...
@@ -221,7 +221,7 @@ class Optimizer(object):
self
.
_create_global_learning_rate
()
if
self
.
_LARS_weight_decay
>
0.0
:
layers
.
append_LARS
(
parameters_and_grads
,
self
.
global_learning_rate
(),
self
.
_
global_learning_rate
(),
self
.
_LARS_weight_decay
)
optimize_ops
=
[]
...
...
@@ -263,8 +263,8 @@ class Optimizer(object):
params_grads
=
append_regularization_ops
(
params_grads
,
self
.
regularization
)
optimize_ops
=
self
.
create_optimization_pass
(
params_grads
,
loss
,
startup_program
)
optimize_ops
=
self
.
_
create_optimization_pass
(
params_grads
,
loss
,
startup_program
)
return
optimize_ops
,
params_grads
...
...
python/paddle/fluid/tests/book/notest_understand_sentiment.py
浏览文件 @
971cf705
...
...
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
from
paddle.fluid.layers.device
import
get_places
import
unittest
import
paddle.fluid
as
fluid
import
paddle
...
...
@@ -144,7 +144,7 @@ def train(word_dict,
cost
,
acc_out
,
prediction
=
net_method
(
data
,
label
,
input_dim
=
dict_dim
,
class_dim
=
class_dim
)
else
:
places
=
fluid
.
layers
.
get_places
()
places
=
get_places
()
pd
=
fluid
.
layers
.
ParallelDo
(
places
)
with
pd
.
do
():
cost
,
acc
,
_
=
net_method
(
...
...
python/paddle/fluid/tests/book/test_recognize_digits.py
浏览文件 @
971cf705
...
...
@@ -12,15 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
import
argparse
import
paddle.fluid
as
fluid
import
paddle
import
sys
import
numpy
import
unittest
import
math
import
sys
import
os
import
sys
import
unittest
import
numpy
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.layers.device
import
get_places
BATCH_SIZE
=
64
...
...
@@ -76,7 +78,7 @@ def train(nn_type,
net_conf
=
conv_net
if
parallel
:
places
=
fluid
.
layers
.
get_places
()
places
=
get_places
()
pd
=
fluid
.
layers
.
ParallelDo
(
places
)
with
pd
.
do
():
img_
=
pd
.
read_input
(
img
)
...
...
python/paddle/fluid/tests/book/test_word2vec.py
浏览文件 @
971cf705
...
...
@@ -14,6 +14,7 @@
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.layers.device
import
get_places
import
unittest
import
os
import
numpy
as
np
...
...
@@ -80,7 +81,7 @@ def train(use_cuda, is_sparse, is_parallel, save_dirname, is_local=True):
avg_cost
,
predict_word
=
__network__
(
[
first_word
,
second_word
,
third_word
,
forth_word
,
next_word
])
else
:
places
=
fluid
.
layers
.
get_places
()
places
=
get_places
()
pd
=
fluid
.
layers
.
ParallelDo
(
places
)
with
pd
.
do
():
avg_cost
,
predict_word
=
__network__
(
...
...
python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py
浏览文件 @
971cf705
...
...
@@ -12,12 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
import
math
import
sys
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.layers.device
import
get_places
# need to fix random seed and training data to compare the loss
# value accurately calculated by the default and the memory optimization
# version.
...
...
@@ -34,7 +35,7 @@ if fluid.core.is_compiled_with_cuda():
use_nccl
=
False
place
=
fluid
.
CUDAPlace
(
0
)
places
=
fluid
.
layers
.
get_places
(
device_count
=
0
,
device_type
=
device_type
)
places
=
get_places
(
device_count
=
0
,
device_type
=
device_type
)
pd
=
fluid
.
layers
.
ParallelDo
(
places
,
use_nccl
=
use_nccl
)
with
pd
.
do
():
x_
=
pd
.
read_input
(
x
)
...
...
python/paddle/fluid/tests/unittests/test_calc_gradient.py
浏览文件 @
971cf705
...
...
@@ -16,8 +16,6 @@ import unittest
import
paddle.fluid
as
fluid
import
paddle.fluid.layers
as
layers
import
paddle.fluid.framework
as
framework
import
paddle.fluid.optimizer
as
optimizer
from
paddle.fluid.backward
import
calc_gradient
...
...
python/paddle/fluid/tests/unittests/test_get_places_op.py
浏览文件 @
971cf705
...
...
@@ -13,6 +13,7 @@
# limitations under the License.
import
paddle.fluid
as
fluid
from
paddle.fluid.layers.device
import
get_places
import
decorators
import
unittest
...
...
@@ -20,7 +21,7 @@ import unittest
class
TestGetPlaces
(
unittest
.
TestCase
):
@
decorators
.
prog_scope
()
def
test_get_places
(
self
):
places
=
fluid
.
layers
.
get_places
()
places
=
get_places
()
cpu
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
cpu
)
exe
.
run
(
fluid
.
default_main_program
())
...
...
python/paddle/fluid/tests/unittests/test_layers.py
浏览文件 @
971cf705
...
...
@@ -16,6 +16,7 @@ from __future__ import print_function
import
unittest
import
paddle.fluid.layers
as
layers
from
paddle.fluid.layers.device
import
get_places
import
paddle.fluid.nets
as
nets
from
paddle.fluid.framework
import
Program
,
program_guard
,
default_main_program
from
paddle.fluid.param_attr
import
ParamAttr
...
...
@@ -238,7 +239,7 @@ class TestBook(unittest.TestCase):
def
test_get_places
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
get_places
(
device_count
=
4
)
x
=
get_places
(
device_count
=
4
)
self
.
assertIsNotNone
(
x
)
print
(
str
(
program
))
...
...
python/paddle/fluid/tests/unittests/test_optimizer.py
浏览文件 @
971cf705
...
...
@@ -97,7 +97,7 @@ class TestMomentumOptimizer(unittest.TestCase):
params_grads
=
append_backward
(
mean_out
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
momentum_optimizer
.
get_accumulators
()),
0
)
opts
=
momentum_optimizer
.
create_optimization_pass
(
opts
=
momentum_optimizer
.
_
create_optimization_pass
(
params_grads
,
mul_out
,
init_program
)
self
.
assertEqual
(
len
(
opts
),
3
)
sgd_op
=
opts
[
-
1
]
...
...
@@ -151,7 +151,7 @@ class TestMomentumOptimizer(unittest.TestCase):
params_grads
=
append_backward
(
mean_out
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
momentum_optimizer
.
get_accumulators
()),
0
)
opts
=
momentum_optimizer
.
create_optimization_pass
(
opts
=
momentum_optimizer
.
_
create_optimization_pass
(
params_grads
,
mul_out
,
init_program
)
self
.
assertEqual
(
len
(
opts
),
3
)
sgd_op
=
opts
[
-
1
]
...
...
@@ -214,8 +214,8 @@ class TestAdagradOptimizer(unittest.TestCase):
params_grads
=
append_backward
(
mean_out
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
adagrad_optimizer
.
get_accumulators
()),
0
)
opts
=
adagrad_optimizer
.
create_optimization_pass
(
params_grads
,
mul_out
,
init_program
)
opts
=
adagrad_optimizer
.
_create_optimization_pass
(
params_grads
,
mul_out
,
init_program
)
self
.
assertEqual
(
len
(
opts
),
3
)
self
.
assertEqual
([
op
.
type
for
op
in
opts
],
[
"fill_constant"
,
"elementwise_mul"
,
"adagrad"
])
...
...
@@ -278,8 +278,8 @@ class TestAdamOptimizer(unittest.TestCase):
params_grads
=
append_backward
(
mean_out
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
adam_optimizer
.
get_accumulators
()),
0
)
opts
=
adam_optimizer
.
create_optimization_pass
(
params_grads
,
mul_out
,
init_program
)
opts
=
adam_optimizer
.
_
create_optimization_pass
(
params_grads
,
mul_out
,
init_program
)
self
.
assertEqual
(
len
(
opts
),
5
)
self
.
assertEqual
(
[
op
.
type
for
op
in
opts
],
...
...
@@ -345,8 +345,8 @@ class TestAdamaxOptimizer(unittest.TestCase):
params_grads
=
append_backward
(
mean_out
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
adamax_optimizer
.
get_accumulators
()),
0
)
opts
=
adamax_optimizer
.
create_optimization_pass
(
params_grads
,
mul_out
,
init_program
)
opts
=
adamax_optimizer
.
_
create_optimization_pass
(
params_grads
,
mul_out
,
init_program
)
self
.
assertEqual
(
len
(
opts
),
4
)
self
.
assertEqual
(
[
op
.
type
for
op
in
opts
],
...
...
@@ -409,7 +409,7 @@ class TestDecayedAdagradOptimizer(unittest.TestCase):
params_grads
=
append_backward
(
mean_out
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
decayed_adagrad_optimizer
.
get_accumulators
()),
0
)
opts
=
decayed_adagrad_optimizer
.
create_optimization_pass
(
opts
=
decayed_adagrad_optimizer
.
_
create_optimization_pass
(
params_grads
,
mul_out
,
init_program
)
self
.
assertEqual
(
len
(
opts
),
3
)
self
.
assertEqual
(
...
...
@@ -475,8 +475,8 @@ class TestFtrlOptimizer(unittest.TestCase):
params_grads
=
append_backward
(
mean_out
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
ftrl_optimizer
.
get_accumulators
()),
0
)
opts
=
ftrl_optimizer
.
create_optimization_pass
(
params_grads
,
mul_out
,
init_program
)
opts
=
ftrl_optimizer
.
_
create_optimization_pass
(
params_grads
,
mul_out
,
init_program
)
self
.
assertEqual
(
len
(
opts
),
3
)
self
.
assertEqual
([
op
.
type
for
op
in
opts
],
[
"fill_constant"
,
"elementwise_mul"
,
"ftrl"
])
...
...
python/paddle/fluid/tests/unittests/test_parallel_op.py
浏览文件 @
971cf705
...
...
@@ -15,6 +15,7 @@
import
unittest
import
paddle.fluid
as
fluid
from
paddle.fluid.layers.device
import
get_places
import
paddle.fluid.profiler
as
profiler
import
numpy
...
...
@@ -115,7 +116,7 @@ class BaseParallelForTest(unittest.TestCase):
if
use_parallel
:
thread_num
=
fluid
.
core
.
get_cuda_device_count
(
)
if
use_gpu
else
8
places
=
fluid
.
layers
.
get_places
(
thread_num
)
places
=
get_places
(
thread_num
)
pd
=
fluid
.
layers
.
ParallelDo
(
places
,
use_nccl
=
use_nccl
)
data
=
next
(
generator
)
...
...
python/paddle/fluid/tests/unittests/test_squeeze_op.py
0 → 100644
浏览文件 @
971cf705
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
# Correct: General.
class
TestSqueezeOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"squeeze"
self
.
init_test_case
()
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
self
.
ori_shape
).
astype
(
"float32"
)}
self
.
init_attrs
()
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
self
.
new_shape
)}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
"X"
],
"Out"
)
def
init_test_case
(
self
):
self
.
ori_shape
=
(
1
,
3
,
1
,
5
)
self
.
axes
=
(
0
,
2
)
self
.
new_shape
=
(
3
,
5
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"axes"
:
self
.
axes
,
"inplace"
:
False
}
# Correct: There is mins axis.
class
TestSqueezeOp1
(
TestSqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
1
,
3
,
1
,
5
)
self
.
axes
=
(
0
,
-
2
)
self
.
new_shape
=
(
3
,
5
)
# Correct: No axes input.
class
TestSqueezeOp2
(
TestSqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
1
,
3
,
1
,
5
)
self
.
axes
=
()
self
.
new_shape
=
(
3
,
5
)
# Correct: Just part of axes be squeezed.
class
TestSqueezeOp3
(
TestSqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
3
,
1
,
5
,
1
,
4
,
1
)
self
.
axes
=
(
1
,
-
1
)
self
.
new_shape
=
(
3
,
5
,
1
,
4
)
# Correct: Inplace.
class
TestSqueezeOpInplace1
(
TestSqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
1
,
3
,
1
,
5
)
self
.
axes
=
(
0
,
2
)
self
.
new_shape
=
(
3
,
5
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"axes"
:
self
.
axes
,
"inplace"
:
True
}
# Correct: Inplace. There is mins axis.
class
TestSqueezeOpInplace2
(
TestSqueezeOp
):
def
inti_test_case
(
self
):
self
.
ori_shape
=
(
1
,
3
,
1
,
5
)
self
.
axes
=
(
0
,
-
2
)
self
.
new_shape
=
(
3
,
5
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"axes"
:
self
.
axes
,
"inplace"
:
True
}
# Correct: Inplace. No axes input.
class
TestSqueezeOpInplace3
(
TestSqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
1
,
3
,
1
,
5
)
self
.
axes
=
()
self
.
new_shape
=
(
3
,
5
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"axes"
:
self
.
axes
,
"inplace"
:
True
}
# Correct: Inpalce. Just part of axes be squeezed.
class
TestSqueezeOpInplace4
(
TestSqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
3
,
1
,
5
,
1
,
4
,
1
)
self
.
axes
=
(
1
,
-
1
)
self
.
new_shape
=
(
3
,
5
,
1
,
4
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"axes"
:
self
.
axes
,
"inplace"
:
True
}
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_unsqueeze_op.py
0 → 100644
浏览文件 @
971cf705
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
# Correct: General.
class
TestUnsqueezeOp
(
OpTest
):
def
setUp
(
self
):
self
.
init_test_case
()
self
.
op_type
=
"unsqueeze"
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
self
.
ori_shape
).
astype
(
"float32"
)}
self
.
init_attrs
()
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
self
.
new_shape
)}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
"X"
],
"Out"
)
def
init_test_case
(
self
):
self
.
ori_shape
=
(
3
,
5
)
self
.
axes
=
(
1
,
2
)
self
.
new_shape
=
(
3
,
1
,
1
,
5
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"axes"
:
self
.
axes
,
"inplace"
:
False
}
# Correct: Single input index.
class
TestUnsqueezeOp1
(
TestUnsqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
3
,
5
)
self
.
axes
=
(
-
1
,
)
self
.
new_shape
=
(
3
,
5
,
1
)
# Correct: Mixed input axis.
class
TestUnsqueezeOp2
(
TestUnsqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
3
,
5
)
self
.
axes
=
(
0
,
-
1
)
self
.
new_shape
=
(
1
,
3
,
5
,
1
)
# Correct: There is duplicated axis.
class
TestUnsqueezeOp3
(
TestUnsqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
3
,
2
,
5
)
self
.
axes
=
(
0
,
3
,
3
)
self
.
new_shape
=
(
1
,
3
,
2
,
1
,
1
,
5
)
# Correct: Reversed axes.
class
TestUnsqueezeOp4
(
TestUnsqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
3
,
2
,
5
)
self
.
axes
=
(
3
,
1
,
1
)
self
.
new_shape
=
(
3
,
1
,
1
,
2
,
5
,
1
)
# Correct: Inplace.
class
TestUnsqueezeOpInplace1
(
TestUnsqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
3
,
5
)
self
.
axes
=
(
0
,
2
)
self
.
new_shape
=
(
1
,
3
,
1
,
5
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"axes"
:
self
.
axes
,
"inplace"
:
True
}
# Correct: Inplace. There is mins index.
class
TestUnsqueezeOpInplace2
(
TestUnsqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
3
,
5
)
self
.
axes
=
(
0
,
-
2
)
self
.
new_shape
=
(
1
,
3
,
1
,
5
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"axes"
:
self
.
axes
,
"inplace"
:
True
}
# Correct: Inplace. There is duplicated axis.
class
TestUnsqueezeOpInplace3
(
TestUnsqueezeOp
):
def
init_test_case
(
self
):
self
.
ori_shape
=
(
3
,
2
,
5
)
self
.
axes
=
(
0
,
3
,
3
)
self
.
new_shape
=
(
1
,
3
,
2
,
1
,
1
,
5
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"axes"
:
self
.
axes
,
"inplace"
:
True
}
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录