Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
22462007
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
22462007
编写于
8月 06, 2020
作者:
Y
yaoxuefeng
提交者:
GitHub
8月 06, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add new flatten op test=develop (#25393)
上级
20c8432a
变更
12
显示空白变更内容
内联
并排
Showing
12 changed file
with
741 addition
and
48 deletion
+741
-48
paddle/fluid/operators/flatten_op.cc
paddle/fluid/operators/flatten_op.cc
+183
-0
paddle/fluid/operators/flatten_op.cu.cc
paddle/fluid/operators/flatten_op.cu.cc
+23
-0
paddle/fluid/operators/flatten_op.h
paddle/fluid/operators/flatten_op.h
+68
-0
python/paddle/fluid/dygraph/nn.py
python/paddle/fluid/dygraph/nn.py
+60
-1
python/paddle/fluid/layers/detection.py
python/paddle/fluid/layers/detection.py
+46
-46
python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py
...fluid/tests/unittests/test_flatten_contiguous_range_op.py
+204
-0
python/paddle/fluid/tests/unittests/test_layers.py
python/paddle/fluid/tests/unittests/test_layers.py
+45
-0
python/paddle/fluid/tests/unittests/white_list/no_check_set_white_list.py
...uid/tests/unittests/white_list/no_check_set_white_list.py
+1
-0
python/paddle/nn/__init__.py
python/paddle/nn/__init__.py
+1
-0
python/paddle/nn/layer/__init__.py
python/paddle/nn/layer/__init__.py
+1
-0
python/paddle/nn/layer/common.py
python/paddle/nn/layer/common.py
+1
-0
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+108
-1
未找到文件。
paddle/fluid/operators/flatten_op.cc
浏览文件 @
22462007
...
@@ -241,6 +241,156 @@ class Flatten2GradOp : public framework::OperatorWithKernel {
...
@@ -241,6 +241,156 @@ class Flatten2GradOp : public framework::OperatorWithKernel {
}
}
};
};
class
FlattenContiguousRangeOp
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"X"
),
"Input"
,
"X"
,
"FlattenContiguousRange"
);
OP_INOUT_CHECK
(
ctx
->
HasOutput
(
"Out"
),
"Output"
,
"Out"
,
"FlattenContiguousRange"
);
const
auto
&
start_axis
=
ctx
->
Attrs
().
Get
<
int
>
(
"start_axis"
);
const
auto
&
stop_axis
=
ctx
->
Attrs
().
Get
<
int
>
(
"stop_axis"
);
const
auto
&
in_dims
=
ctx
->
GetInputDim
(
"X"
);
int
in_dims_size
=
in_dims
.
size
();
int
real_start_axis
=
start_axis
,
real_stop_axis
=
stop_axis
;
if
(
start_axis
<
0
)
{
real_start_axis
=
start_axis
+
in_dims_size
;
}
if
(
stop_axis
<
0
)
{
real_stop_axis
=
stop_axis
+
in_dims_size
;
}
PADDLE_ENFORCE_GE
(
real_stop_axis
,
real_start_axis
,
platform
::
errors
::
InvalidArgument
(
"The stop_axis should be greater"
"than or equal to start_axis."
));
const
auto
&
out_dims
=
GetOutputShape
(
real_start_axis
,
real_stop_axis
,
in_dims
);
ctx
->
SetOutputDim
(
"Out"
,
framework
::
make_ddim
(
out_dims
));
if
(
in_dims
[
0
]
==
out_dims
[
0
])
{
// Only pass LoD when the first dimension of output and Input(X)
// are the same.
ctx
->
ShareLoD
(
"X"
,
"Out"
);
}
OP_INOUT_CHECK
(
ctx
->
HasOutput
(
"XShape"
),
"Output"
,
"XShape"
,
"Flatten2"
);
std
::
vector
<
int64_t
>
xshape_dims
(
in_dims
.
size
()
+
1
);
xshape_dims
[
0
]
=
0
;
for
(
int
i
=
0
;
i
<
in_dims
.
size
();
++
i
)
{
xshape_dims
[
i
+
1
]
=
in_dims
[
i
];
}
ctx
->
SetOutputDim
(
"XShape"
,
framework
::
make_ddim
(
xshape_dims
));
ctx
->
ShareLoD
(
"X"
,
"XShape"
);
}
static
std
::
vector
<
int32_t
>
GetOutputShape
(
const
int
start_axis
,
const
int
stop_axis
,
const
framework
::
DDim
&
in_dims
)
{
int64_t
outer
=
1
;
std
::
vector
<
int32_t
>
out_shape
;
int
in_dims_size
=
in_dims
.
size
();
out_shape
.
reserve
(
in_dims_size
-
stop_axis
+
start_axis
);
for
(
int
i
=
0
;
i
<
start_axis
;
++
i
)
{
out_shape
.
push_back
(
in_dims
[
i
]);
}
for
(
int
i
=
start_axis
;
i
<=
stop_axis
;
i
++
)
{
outer
*=
in_dims
[
i
];
}
out_shape
.
push_back
(
outer
);
for
(
int
i
=
stop_axis
+
1
;
i
<
in_dims_size
;
i
++
)
{
out_shape
.
push_back
(
in_dims
[
i
]);
}
return
out_shape
;
}
};
class
FlattenContiguousRangeOpMaker
:
public
FlattenOpMaker
{
public:
void
Make
()
override
{
AddInput
(
"X"
,
"(Tensor) A tensor of rank >= axis."
);
AddOutput
(
"Out"
,
"A 2D tensor is reshaped input tensor. The input dimensions"
"up to axis are flattened to the outer dimension of the output"
"and the remaining input dimensions are flattened into the inner"
"dimension of the output."
);
AddAttr
<
int
>
(
"start_axis"
,
"(int)"
"Indicate the input start dimension (exclusive) to flatten"
)
.
SetDefault
(
1
);
AddAttr
<
int
>
(
"stop_axis"
,
"(int)"
"Indicate the input stop dimension (exclusive) to flatten"
)
.
SetDefault
(
1
);
AddComment
(
R"DOC(
Flatten Operator
Flattens the input tensor into a new matrix according to start_axis and stop_axis.
Examples:
Case 1:
Given
X.shape = (3, 100, 100, 4)
and
start_axis = 2, stop_axis = -1
We get:
Out.shape = (3, 100, 400)
Case 2:
Given
X.shape = (3, 100, 100, 4)
and
start_axis = 0, stop_axis = -1
We get:
Out.shape = (3 * 100 * 100 * 4)
)DOC"
);
AddOutput
(
"XShape"
,
"XShape is just used to store the shape and lod of X, which will "
"be used in FlattenGradOp."
)
.
AsIntermediate
();
}
};
template
<
typename
T
>
class
FlattenContiguousRangeGradOpMaker
:
public
framework
::
SingleGradOpMaker
<
T
>
{
public:
using
framework
::
SingleGradOpMaker
<
T
>::
SingleGradOpMaker
;
void
Apply
(
GradOpPtr
<
T
>
grad_op
)
const
override
{
grad_op
->
SetType
(
"flatten_contiguous_range_grad"
);
grad_op
->
SetInput
(
"XShape"
,
this
->
Output
(
"XShape"
));
grad_op
->
SetInput
(
framework
::
GradVarName
(
"Out"
),
this
->
OutputGrad
(
"Out"
));
grad_op
->
SetOutput
(
framework
::
GradVarName
(
"X"
),
this
->
InputGrad
(
"X"
));
grad_op
->
SetAttrMap
(
this
->
Attrs
());
}
};
class
FlattenContiguousRangeGradOp
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
context
)
const
override
{
OP_INOUT_CHECK
(
context
->
HasInput
(
"XShape"
),
"Input"
,
"XShape"
,
"FlattenContiguousRangeGrad"
);
OP_INOUT_CHECK
(
context
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
"Input"
,
framework
::
GradVarName
(
"Out"
),
"FlattenContiguousRangeGrad"
);
auto
xshape_dims
=
context
->
GetInputDim
(
"XShape"
);
auto
x_dims
=
framework
::
slice_ddim
(
xshape_dims
,
1
,
xshape_dims
.
size
());
context
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
x_dims
);
context
->
ShareLoD
(
"XShape"
,
framework
::
GradVarName
(
"X"
));
}
protected:
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
return
framework
::
OpKernelType
(
OperatorWithKernel
::
IndicateVarDataType
(
ctx
,
framework
::
GradVarName
(
"Out"
)),
ctx
.
device_context
());
}
};
DECLARE_INPLACE_OP_INFERER
(
FlattenOpInplaceInferer
,
{
"X"
,
"Out"
});
DECLARE_INPLACE_OP_INFERER
(
FlattenOpInplaceInferer
,
{
"X"
,
"Out"
});
DECLARE_INPLACE_OP_INFERER
(
FlattenGradInplaceInferer
,
DECLARE_INPLACE_OP_INFERER
(
FlattenGradInplaceInferer
,
{
framework
::
GradVarName
(
"Out"
),
{
framework
::
GradVarName
(
"Out"
),
...
@@ -266,6 +416,16 @@ REGISTER_OPERATOR(flatten2, ops::Flatten2Op, ops::Flatten2OpMaker,
...
@@ -266,6 +416,16 @@ REGISTER_OPERATOR(flatten2, ops::Flatten2Op, ops::Flatten2OpMaker,
REGISTER_OPERATOR
(
flatten2_grad
,
ops
::
Flatten2GradOp
,
REGISTER_OPERATOR
(
flatten2_grad
,
ops
::
Flatten2GradOp
,
ops
::
FlattenGradInplaceInferer
);
ops
::
FlattenGradInplaceInferer
);
REGISTER_OPERATOR
(
flatten_contiguous_range
,
ops
::
FlattenContiguousRangeOp
,
ops
::
FlattenContiguousRangeOpMaker
,
ops
::
FlattenContiguousRangeGradOpMaker
<
paddle
::
framework
::
OpDesc
>
,
ops
::
FlattenContiguousRangeGradOpMaker
<
paddle
::
imperative
::
OpBase
>
,
ops
::
FlattenOpInplaceInferer
);
REGISTER_OPERATOR
(
flatten_contiguous_range_grad
,
ops
::
FlattenContiguousRangeGradOp
,
ops
::
FlattenGradInplaceInferer
);
REGISTER_OP_CPU_KERNEL
(
REGISTER_OP_CPU_KERNEL
(
flatten
,
ops
::
FlattenKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
flatten
,
ops
::
FlattenKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
ops
::
FlattenKernel
<
paddle
::
platform
::
CPUDeviceContext
,
double
>
,
ops
::
FlattenKernel
<
paddle
::
platform
::
CPUDeviceContext
,
double
>
,
...
@@ -292,3 +452,26 @@ REGISTER_OP_CPU_KERNEL(
...
@@ -292,3 +452,26 @@ REGISTER_OP_CPU_KERNEL(
ops
::
Flatten2GradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
int
>
,
ops
::
Flatten2GradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
int
>
,
ops
::
Flatten2GradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
int8_t
>
,
ops
::
Flatten2GradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
int8_t
>
,
ops
::
Flatten2GradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
int64_t
>
);
ops
::
Flatten2GradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
int64_t
>
);
REGISTER_OP_CPU_KERNEL
(
flatten_contiguous_range
,
ops
::
FlattenContiguousRangeKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
ops
::
FlattenContiguousRangeKernel
<
paddle
::
platform
::
CPUDeviceContext
,
double
>
,
ops
::
FlattenContiguousRangeKernel
<
paddle
::
platform
::
CPUDeviceContext
,
int
>
,
ops
::
FlattenContiguousRangeKernel
<
paddle
::
platform
::
CPUDeviceContext
,
int8_t
>
,
ops
::
FlattenContiguousRangeKernel
<
paddle
::
platform
::
CPUDeviceContext
,
int64_t
>
);
REGISTER_OP_CPU_KERNEL
(
flatten_contiguous_range_grad
,
ops
::
FlattenContiguousRangeGradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
ops
::
FlattenContiguousRangeGradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
double
>
,
ops
::
FlattenContiguousRangeGradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
int
>
,
ops
::
FlattenContiguousRangeGradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
int8_t
>
,
ops
::
FlattenContiguousRangeGradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
int64_t
>
);
paddle/fluid/operators/flatten_op.cu.cc
浏览文件 @
22462007
...
@@ -42,3 +42,26 @@ REGISTER_OP_CUDA_KERNEL(
...
@@ -42,3 +42,26 @@ REGISTER_OP_CUDA_KERNEL(
ops
::
Flatten2GradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
int
>
,
ops
::
Flatten2GradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
int
>
,
ops
::
Flatten2GradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
int8_t
>
,
ops
::
Flatten2GradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
int8_t
>
,
ops
::
Flatten2GradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
int64_t
>
);
ops
::
Flatten2GradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
int64_t
>
);
REGISTER_OP_CUDA_KERNEL
(
flatten_contiguous_range
,
ops
::
FlattenContiguousRangeKernel
<
paddle
::
platform
::
CUDADeviceContext
,
float
>
,
ops
::
FlattenContiguousRangeKernel
<
paddle
::
platform
::
CUDADeviceContext
,
double
>
,
ops
::
FlattenContiguousRangeKernel
<
paddle
::
platform
::
CUDADeviceContext
,
int
>
,
ops
::
FlattenContiguousRangeKernel
<
paddle
::
platform
::
CUDADeviceContext
,
int8_t
>
,
ops
::
FlattenContiguousRangeKernel
<
paddle
::
platform
::
CUDADeviceContext
,
int64_t
>
);
REGISTER_OP_CUDA_KERNEL
(
flatten_contiguous_range_grad
,
ops
::
FlattenContiguousRangeGradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
float
>
,
ops
::
FlattenContiguousRangeGradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
double
>
,
ops
::
FlattenContiguousRangeGradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
int
>
,
ops
::
FlattenContiguousRangeGradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
int8_t
>
,
ops
::
FlattenContiguousRangeGradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
int64_t
>
);
paddle/fluid/operators/flatten_op.h
浏览文件 @
22462007
...
@@ -112,5 +112,73 @@ class Flatten2GradKernel : public framework::OpKernel<T> {
...
@@ -112,5 +112,73 @@ class Flatten2GradKernel : public framework::OpKernel<T> {
}
}
};
};
template
<
typename
DeviceContext
,
typename
T
>
class
FlattenContiguousRangeKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
&
start_axis
=
context
.
Attr
<
int
>
(
"start_axis"
);
auto
&
stop_axis
=
context
.
Attr
<
int
>
(
"stop_axis"
);
auto
*
in
=
context
.
Input
<
framework
::
LoDTensor
>
(
"X"
);
auto
x_dims
=
in
->
dims
();
int
in_dims_size
=
x_dims
.
size
();
int
real_start_axis
=
start_axis
,
real_stop_axis
=
stop_axis
;
if
(
start_axis
<
0
)
{
real_start_axis
=
start_axis
+
in_dims_size
;
}
if
(
stop_axis
<
0
)
{
real_stop_axis
=
stop_axis
+
in_dims_size
;
}
auto
*
out
=
context
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
auto
out_dims
=
framework
::
make_ddim
(
GetOutputShape
(
real_start_axis
,
real_stop_axis
,
x_dims
));
out
->
mutable_data
(
context
.
GetPlace
(),
in
->
type
());
framework
::
TensorCopy
(
*
in
,
context
.
GetPlace
(),
context
.
template
device_context
<
platform
::
DeviceContext
>(),
out
);
out
->
Resize
(
out_dims
);
}
static
std
::
vector
<
int32_t
>
GetOutputShape
(
const
int
start_axis
,
const
int
stop_axis
,
const
framework
::
DDim
&
in_dims
)
{
int64_t
outer
=
1
;
std
::
vector
<
int32_t
>
out_shape
;
int
in_dims_size
=
in_dims
.
size
();
out_shape
.
reserve
(
in_dims_size
-
stop_axis
+
start_axis
);
for
(
int
i
=
0
;
i
<
start_axis
;
++
i
)
{
out_shape
.
push_back
(
in_dims
[
i
]);
}
for
(
int
i
=
start_axis
;
i
<=
stop_axis
;
i
++
)
{
outer
*=
in_dims
[
i
];
}
out_shape
.
push_back
(
outer
);
for
(
int
i
=
stop_axis
+
1
;
i
<
in_dims_size
;
i
++
)
{
out_shape
.
push_back
(
in_dims
[
i
]);
}
return
out_shape
;
}
};
template
<
typename
DeviceContext
,
typename
T
>
class
FlattenContiguousRangeGradKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
*
d_x
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
d_out
=
ctx
.
Input
<
framework
::
LoDTensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
xshape_dims
=
ctx
.
Input
<
framework
::
LoDTensor
>
(
"XShape"
)
->
dims
();
auto
x_dims
=
framework
::
slice_ddim
(
xshape_dims
,
1
,
xshape_dims
.
size
());
d_x
->
mutable_data
(
ctx
.
GetPlace
(),
d_out
->
type
());
framework
::
TensorCopySync
(
*
d_out
,
ctx
.
GetPlace
(),
d_x
);
d_x
->
Resize
(
x_dims
);
}
};
}
// namespace operators
}
// namespace operators
}
// namespace paddle
}
// namespace paddle
python/paddle/fluid/dygraph/nn.py
浏览文件 @
22462007
...
@@ -35,7 +35,7 @@ __all__ = [
...
@@ -35,7 +35,7 @@ __all__ = [
'Conv2D'
,
'Conv3D'
,
'Pool2D'
,
'Linear'
,
'BatchNorm'
,
'Dropout'
,
'Embedding'
,
'Conv2D'
,
'Conv3D'
,
'Pool2D'
,
'Linear'
,
'BatchNorm'
,
'Dropout'
,
'Embedding'
,
'GRUUnit'
,
'InstanceNorm'
,
'LayerNorm'
,
'NCE'
,
'PRelu'
,
'GRUUnit'
,
'InstanceNorm'
,
'LayerNorm'
,
'NCE'
,
'PRelu'
,
'BilinearTensorProduct'
,
'Conv2DTranspose'
,
'Conv3DTranspose'
,
'GroupNorm'
,
'BilinearTensorProduct'
,
'Conv2DTranspose'
,
'Conv3DTranspose'
,
'GroupNorm'
,
'SpectralNorm'
,
'TreeConv'
'SpectralNorm'
,
'TreeConv'
,
'Flatten'
]
]
...
@@ -3182,3 +3182,62 @@ class TreeConv(layers.Layer):
...
@@ -3182,3 +3182,62 @@ class TreeConv(layers.Layer):
else
:
else
:
pre_activation
=
out
pre_activation
=
out
return
self
.
_helper
.
append_activation
(
pre_activation
,
act
=
self
.
_act
)
return
self
.
_helper
.
append_activation
(
pre_activation
,
act
=
self
.
_act
)
class
Flatten
(
layers
.
Layer
):
"""
:alias_main: paddle.nn.Flatten
:alias: paddle.nn.Flatten,paddle.nn.layer.Flatten,paddle.nn.layer.common.Flatten
This interface is used to construct a callable object of the ``FLatten`` class.
For more details, refer to code examples.
It implements flatten a contiguous range of dims into a tensor.
Equation:
Parameters:
start_axis(int): first dim to flatten (default = 1)
stop_axis(int): last dim to flatten (default = -1).
Returns:
None
Examples:
.. code-block:: python
import paddle
from paddle.imperative import to_variable
import numpy as np
inp_np = np.ones([5, 2, 3, 4]).astype('float32')
paddle.enable_imperative()
inp_np = to_variable(inp_np)
flatten = paddle.nn.Flatten(start_axis=1, stop_axis=2)
flatten_res = flatten(inp_np)
"""
def
__init__
(
self
,
start_axis
=
1
,
stop_axis
=-
1
):
super
(
Flatten
,
self
).
__init__
()
self
.
start_axis
=
start_axis
self
.
stop_axis
=
stop_axis
def
forward
(
self
,
input
):
out
=
self
.
_helper
.
create_variable_for_type_inference
(
input
.
dtype
)
x_shape
=
self
.
_helper
.
create_variable_for_type_inference
(
input
.
dtype
)
if
in_dygraph_mode
():
dy_out
,
_
=
core
.
ops
.
flatten_contiguous_range
(
input
,
'start_axis'
,
self
.
start_axis
,
'stop_axis'
,
self
.
stop_axis
)
return
dy_out
self
.
_helper
.
append_op
(
type
=
"flatten_contiguous_range"
,
inputs
=
{
"X"
:
input
},
outputs
=
{
"Out"
:
out
,
"XShape"
:
x_shape
},
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
})
return
out
python/paddle/fluid/layers/detection.py
浏览文件 @
22462007
python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py
0 → 100644
浏览文件 @
22462007
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
import
unittest
import
numpy
as
np
import
paddle.fluid
as
fluid
import
paddle
from
op_test
import
OpTest
class
TestFlattenOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"flatten_contiguous_range"
self
.
start_axis
=
0
self
.
stop_axis
=
-
1
self
.
init_test_case
()
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
"float64"
)}
self
.
init_attrs
()
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
self
.
new_shape
),
"XShape"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
"float32"
)
}
def
test_check_output
(
self
):
self
.
check_output
(
no_check_set
=
[
"XShape"
])
def
test_check_grad
(
self
):
self
.
check_grad
([
"X"
],
"Out"
)
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
self
.
stop_axis
=
-
1
self
.
new_shape
=
(
120
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOp_1
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
1
self
.
stop_axis
=
2
self
.
new_shape
=
(
3
,
10
,
4
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOp_2
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
self
.
stop_axis
=
1
self
.
new_shape
=
(
6
,
5
,
4
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOp_3
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
self
.
stop_axis
=
2
self
.
new_shape
=
(
30
,
4
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOp_4
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
-
2
self
.
stop_axis
=
-
1
self
.
new_shape
=
(
3
,
2
,
20
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOp_5
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
2
self
.
stop_axis
=
2
self
.
new_shape
=
(
3
,
2
,
5
,
4
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOpSixDims
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
3
,
2
,
4
,
4
)
self
.
start_axis
=
3
self
.
stop_axis
=
5
self
.
new_shape
=
(
3
,
2
,
3
,
32
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlatten2OpError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
image_shape
=
(
2
,
3
,
4
,
4
)
x
=
np
.
arange
(
image_shape
[
0
]
*
image_shape
[
1
]
*
image_shape
[
2
]
*
image_shape
[
3
]).
reshape
(
image_shape
)
/
100.
x
=
x
.
astype
(
'float32'
)
def
test_ValueError1
():
x_var
=
paddle
.
nn
.
data
(
name
=
"x"
,
shape
=
image_shape
,
dtype
=
'float32'
)
out
=
paddle
.
flatten
(
x_var
,
start_axis
=
2
,
stop_axis
=
1
)
self
.
assertRaises
(
ValueError
,
test_ValueError1
)
def
test_ValueError2
():
x_var
=
paddle
.
nn
.
data
(
name
=
"x"
,
shape
=
image_shape
,
dtype
=
'float32'
)
paddle
.
flatten
(
x_var
,
start_axis
=
10
,
stop_axis
=
1
)
self
.
assertRaises
(
ValueError
,
test_ValueError2
)
def
test_ValueError3
():
x_var
=
paddle
.
nn
.
data
(
name
=
"x"
,
shape
=
image_shape
,
dtype
=
'float32'
)
paddle
.
flatten
(
x_var
,
start_axis
=
2
,
stop_axis
=
10
)
self
.
assertRaises
(
ValueError
,
test_ValueError3
)
def
test_type
():
# dtype must be float32, float64, int8, int32, int64.
x2
=
np
.
arange
(
image_shape
[
0
]
*
image_shape
[
1
]
*
image_shape
[
2
]
*
image_shape
[
3
]).
reshape
(
image_shape
)
/
100.
x2
=
x2
.
astype
(
'float16'
)
x2_var
=
paddle
.
data
(
name
=
'x2'
,
shape
=
[
3
,
2
,
4
,
5
],
dtype
=
'float16'
)
paddle
.
flatten
(
x2_var
)
self
.
assertRaises
(
TypeError
,
test_type
)
def
test_InputError
():
out
=
paddle
.
flatten
(
x
)
self
.
assertRaises
(
ValueError
,
test_InputError
)
class
TestFlattenPython
(
unittest
.
TestCase
):
def
test_python_api
(
self
):
image_shape
=
(
2
,
3
,
4
,
4
)
x
=
np
.
arange
(
image_shape
[
0
]
*
image_shape
[
1
]
*
image_shape
[
2
]
*
image_shape
[
3
]).
reshape
(
image_shape
)
/
100.
x
=
x
.
astype
(
'float32'
)
def
test_InputError
():
out
=
paddle
.
flatten
(
x
)
self
.
assertRaises
(
ValueError
,
test_InputError
)
def
test_Negative
():
paddle
.
enable_imperative
()
img
=
paddle
.
imperative
.
to_variable
(
x
)
out
=
paddle
.
flatten
(
img
,
start_axis
=-
2
,
stop_axis
=-
1
)
return
out
.
numpy
().
shape
res_shape
=
test_Negative
()
self
.
assertTrue
((
2
,
3
,
16
)
==
res_shape
)
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_layers.py
浏览文件 @
22462007
...
@@ -180,6 +180,51 @@ class TestLayer(LayerTest):
...
@@ -180,6 +180,51 @@ class TestLayer(LayerTest):
self
.
assertRaises
(
TypeError
,
test_type
)
self
.
assertRaises
(
TypeError
,
test_type
)
def
test_Flatten
(
self
):
inp
=
np
.
ones
([
3
,
4
,
4
,
5
],
dtype
=
'float32'
)
with
self
.
static_graph
():
t
=
layers
.
data
(
name
=
'data'
,
shape
=
[
3
,
4
,
4
,
5
],
dtype
=
'float32'
,
append_batch_size
=
False
)
flatten
=
nn
.
Flatten
()
ret
=
flatten
(
t
)
static_ret
=
self
.
get_static_graph_result
(
feed
=
{
'data'
:
inp
},
fetch_list
=
[
ret
])[
0
]
with
self
.
dynamic_graph
():
t
=
base
.
to_variable
(
inp
)
flatten
=
nn
.
Flatten
()
dy_ret
=
flatten
(
t
)
dy_ret_value
=
dy_ret
.
numpy
()
self
.
assertTrue
(
np
.
array_equal
(
static_ret
,
dy_ret_value
))
with
self
.
static_graph
():
# the input of Linear must be Variable.
def
test_Variable
():
inp
=
np
.
ones
([
3
,
32
,
32
],
dtype
=
'float32'
)
linear
=
nn
.
Linear
(
32
,
4
,
bias_attr
=
fluid
.
initializer
.
ConstantInitializer
(
value
=
1
))
linear_ret1
=
linear
(
inp
)
self
.
assertRaises
(
TypeError
,
test_Variable
)
# the input dtype of Linear must be float16 or float32 or float64
# float16 only can be set on GPU place
def
test_type
():
inp
=
np
.
ones
([
3
,
32
,
32
],
dtype
=
'int32'
)
linear
=
nn
.
Linear
(
32
,
4
,
bias_attr
=
fluid
.
initializer
.
ConstantInitializer
(
value
=
1
))
linear_ret2
=
linear
(
inp
)
self
.
assertRaises
(
TypeError
,
test_type
)
def
test_layer_norm
(
self
):
def
test_layer_norm
(
self
):
inp
=
np
.
ones
([
3
,
32
,
32
],
dtype
=
'float32'
)
inp
=
np
.
ones
([
3
,
32
,
32
],
dtype
=
'float32'
)
with
self
.
static_graph
():
with
self
.
static_graph
():
...
...
python/paddle/fluid/tests/unittests/white_list/no_check_set_white_list.py
浏览文件 @
22462007
...
@@ -17,6 +17,7 @@ no_check_set_white_list = [
...
@@ -17,6 +17,7 @@ no_check_set_white_list = [
'fake_quantize_range_abs_max'
,
'fake_quantize_range_abs_max'
,
'coalesce_tensor'
,
'coalesce_tensor'
,
'flatten2'
,
'flatten2'
,
'flatten_contiguous_range'
,
'lrn'
,
'lrn'
,
'squeeze2'
,
'squeeze2'
,
'reshape2'
,
'reshape2'
,
...
...
python/paddle/nn/__init__.py
浏览文件 @
22462007
...
@@ -63,6 +63,7 @@ from .layer.common import Pool2D #DEFINE_ALIAS
...
@@ -63,6 +63,7 @@ from .layer.common import Pool2D #DEFINE_ALIAS
from
.layer.common
import
Pad2D
#DEFINE_ALIAS
from
.layer.common
import
Pad2D
#DEFINE_ALIAS
from
.layer.common
import
Embedding
#DEFINE_ALIAS
from
.layer.common
import
Embedding
#DEFINE_ALIAS
from
.layer.common
import
Linear
#DEFINE_ALIAS
from
.layer.common
import
Linear
#DEFINE_ALIAS
from
.layer.common
import
Flatten
#DEFINE_ALIAS
from
.layer.common
import
UpSample
#DEFINE_ALIAS
from
.layer.common
import
UpSample
#DEFINE_ALIAS
from
.layer.conv
import
Conv2D
#DEFINE_ALIAS
from
.layer.conv
import
Conv2D
#DEFINE_ALIAS
from
.layer.conv
import
Conv2DTranspose
#DEFINE_ALIAS
from
.layer.conv
import
Conv2DTranspose
#DEFINE_ALIAS
...
...
python/paddle/nn/layer/__init__.py
浏览文件 @
22462007
...
@@ -39,6 +39,7 @@ from .common import Pool2D #DEFINE_ALIAS
...
@@ -39,6 +39,7 @@ from .common import Pool2D #DEFINE_ALIAS
from
.common
import
Pad2D
#DEFINE_ALIAS
from
.common
import
Pad2D
#DEFINE_ALIAS
from
.common
import
Embedding
#DEFINE_ALIAS
from
.common
import
Embedding
#DEFINE_ALIAS
from
.common
import
Linear
#DEFINE_ALIAS
from
.common
import
Linear
#DEFINE_ALIAS
from
.common
import
Flatten
#DEFINE_ALIAS
from
.common
import
UpSample
#DEFINE_ALIAS
from
.common
import
UpSample
#DEFINE_ALIAS
from
.conv
import
Conv2D
#DEFINE_ALIAS
from
.conv
import
Conv2D
#DEFINE_ALIAS
from
.conv
import
Conv2DTranspose
#DEFINE_ALIAS
from
.conv
import
Conv2DTranspose
#DEFINE_ALIAS
...
...
python/paddle/nn/layer/common.py
浏览文件 @
22462007
...
@@ -17,6 +17,7 @@ from ...fluid.dygraph import BilinearTensorProduct #DEFINE_ALIAS
...
@@ -17,6 +17,7 @@ from ...fluid.dygraph import BilinearTensorProduct #DEFINE_ALIAS
from
...fluid.dygraph
import
Pool2D
#DEFINE_ALIAS
from
...fluid.dygraph
import
Pool2D
#DEFINE_ALIAS
from
...fluid.dygraph
import
Embedding
#DEFINE_ALIAS
from
...fluid.dygraph
import
Embedding
#DEFINE_ALIAS
from
...fluid.dygraph
import
Linear
#DEFINE_ALIAS
from
...fluid.dygraph
import
Linear
#DEFINE_ALIAS
from
...fluid.dygraph
import
Flatten
#DEFINE_ALIAS
from
...fluid.dygraph
import
layers
from
...fluid.dygraph
import
layers
from
..
import
functional
as
F
from
..
import
functional
as
F
...
...
python/paddle/tensor/manipulation.py
浏览文件 @
22462007
...
@@ -25,7 +25,6 @@ import numpy as np
...
@@ -25,7 +25,6 @@ import numpy as np
from
..fluid.layers
import
cast
#DEFINE_ALIAS
from
..fluid.layers
import
cast
#DEFINE_ALIAS
from
..fluid.layers
import
expand
#DEFINE_ALIAS
from
..fluid.layers
import
expand
#DEFINE_ALIAS
from
..fluid.layers
import
expand_as
#DEFINE_ALIAS
from
..fluid.layers
import
expand_as
#DEFINE_ALIAS
from
..fluid.layers
import
flatten
#DEFINE_ALIAS
from
..fluid.layers
import
reshape
#DEFINE_ALIAS
from
..fluid.layers
import
reshape
#DEFINE_ALIAS
from
..fluid.layers
import
scatter
#DEFINE_ALIAS
from
..fluid.layers
import
scatter
#DEFINE_ALIAS
from
..fluid.layers
import
slice
#DEFINE_ALIAS
from
..fluid.layers
import
slice
#DEFINE_ALIAS
...
@@ -169,6 +168,114 @@ def flip(x, axis, name=None):
...
@@ -169,6 +168,114 @@ def flip(x, axis, name=None):
reverse
=
flip
#DEFINE_ALIAS
reverse
=
flip
#DEFINE_ALIAS
def
flatten
(
x
,
start_axis
=
0
,
stop_axis
=-
1
,
name
=
None
):
"""
**Flatten op**
Flattens a contiguous range of axes in a tensor according to start_axis and stop_axis.
For Example:
.. code-block:: text
Case 1:
Given
X.shape = (3, 100, 100, 4)
and
start_axis = 1
end_axis = 2
We get:
Out.shape = (3, 1000 * 100, 2)
Case 2:
Given
X.shape = (3, 100, 100, 4)
and
start_axis = 0
stop_axis = -1
We get:
Out.shape = (3 * 100 * 100 * 4)
Args:
x (Variable): A tensor of number of dimentions >= axis. A tensor with data type float32,
float64, int8, int32, int64.
start_axis (int): the start axis to flatten
stop_axis (int): the stop axis to flatten
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: A tensor with the contents of the input tensor, with input
\
axes flattened by indicated start axis and end axis.
\
A Tensor with data type same as input x.
Raises:
ValueError: If x is not a Variable.
ValueError: If start_axis or stop_axis is illegal.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.enable_imperative()
image_shape=(2, 3, 4, 4)
x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3]).reshape(image_shape) / 100.
x = x.astype('float32')
img = paddle.imperative.to_variable(x)
out = paddle.flatten(img, start_axis=1, stop_axis=2)
# out shape is [2, 12, 4]
"""
if
not
(
isinstance
(
x
,
Variable
)):
raise
ValueError
(
"The input x should be a Variable"
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int8'
,
'int32'
,
'int64'
],
'flatten'
)
helper
=
LayerHelper
(
'flatten'
,
**
locals
())
x_dim
=
len
(
x
.
shape
)
if
not
(
isinstance
(
start_axis
,
int
))
or
(
start_axis
>
x_dim
-
1
)
or
start_axis
<
-
x_dim
:
raise
ValueError
(
"The start_axis should be a int, and in range [-rank(x), rank(x))"
)
if
not
(
isinstance
(
stop_axis
,
int
))
or
(
stop_axis
>
x_dim
-
1
)
or
stop_axis
<
-
x_dim
:
raise
ValueError
(
"The stop_axis should be a int, and in range [-rank(x), rank(x))"
)
if
start_axis
<
0
:
start_axis
=
start_axis
+
x_dim
if
stop_axis
<
0
:
stop_axis
=
stop_axis
+
x_dim
if
start_axis
>
stop_axis
:
raise
ValueError
(
"The stop_axis should be larger than stat_axis"
)
if
in_dygraph_mode
():
dy_out
,
_
=
core
.
ops
.
flatten_contiguous_range
(
x
,
'start_axis'
,
start_axis
,
'stop_axis'
,
stop_axis
)
return
dy_out
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'flatten_contiguous_range'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
'Out'
:
out
,
'XShape'
:
x_shape
},
attrs
=
{
"start_axis"
:
start_axis
,
"stop_axis"
:
stop_axis
})
return
out
def
roll
(
x
,
shifts
,
axis
=
None
,
name
=
None
):
def
roll
(
x
,
shifts
,
axis
=
None
,
name
=
None
):
"""
"""
:alias_main: paddle.roll
:alias_main: paddle.roll
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录