未验证 提交 22462007 编写于 作者: Y yaoxuefeng 提交者: GitHub

add new flatten op test=develop (#25393)

上级 20c8432a
......@@ -241,6 +241,156 @@ class Flatten2GradOp : public framework::OperatorWithKernel {
}
};
class FlattenContiguousRangeOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "FlattenContiguousRange");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out",
"FlattenContiguousRange");
const auto &start_axis = ctx->Attrs().Get<int>("start_axis");
const auto &stop_axis = ctx->Attrs().Get<int>("stop_axis");
const auto &in_dims = ctx->GetInputDim("X");
int in_dims_size = in_dims.size();
int real_start_axis = start_axis, real_stop_axis = stop_axis;
if (start_axis < 0) {
real_start_axis = start_axis + in_dims_size;
}
if (stop_axis < 0) {
real_stop_axis = stop_axis + in_dims_size;
}
PADDLE_ENFORCE_GE(
real_stop_axis, real_start_axis,
platform::errors::InvalidArgument("The stop_axis should be greater"
"than or equal to start_axis."));
const auto &out_dims =
GetOutputShape(real_start_axis, real_stop_axis, in_dims);
ctx->SetOutputDim("Out", framework::make_ddim(out_dims));
if (in_dims[0] == out_dims[0]) {
// Only pass LoD when the first dimension of output and Input(X)
// are the same.
ctx->ShareLoD("X", "Out");
}
OP_INOUT_CHECK(ctx->HasOutput("XShape"), "Output", "XShape", "Flatten2");
std::vector<int64_t> xshape_dims(in_dims.size() + 1);
xshape_dims[0] = 0;
for (int i = 0; i < in_dims.size(); ++i) {
xshape_dims[i + 1] = in_dims[i];
}
ctx->SetOutputDim("XShape", framework::make_ddim(xshape_dims));
ctx->ShareLoD("X", "XShape");
}
static std::vector<int32_t> GetOutputShape(const int start_axis,
const int stop_axis,
const framework::DDim &in_dims) {
int64_t outer = 1;
std::vector<int32_t> out_shape;
int in_dims_size = in_dims.size();
out_shape.reserve(in_dims_size - stop_axis + start_axis);
for (int i = 0; i < start_axis; ++i) {
out_shape.push_back(in_dims[i]);
}
for (int i = start_axis; i <= stop_axis; i++) {
outer *= in_dims[i];
}
out_shape.push_back(outer);
for (int i = stop_axis + 1; i < in_dims_size; i++) {
out_shape.push_back(in_dims[i]);
}
return out_shape;
}
};
class FlattenContiguousRangeOpMaker : public FlattenOpMaker {
public:
void Make() override {
AddInput("X", "(Tensor) A tensor of rank >= axis.");
AddOutput("Out",
"A 2D tensor is reshaped input tensor. The input dimensions"
"up to axis are flattened to the outer dimension of the output"
"and the remaining input dimensions are flattened into the inner"
"dimension of the output.");
AddAttr<int>("start_axis",
"(int)"
"Indicate the input start dimension (exclusive) to flatten")
.SetDefault(1);
AddAttr<int>("stop_axis",
"(int)"
"Indicate the input stop dimension (exclusive) to flatten")
.SetDefault(1);
AddComment(R"DOC(
Flatten Operator
Flattens the input tensor into a new matrix according to start_axis and stop_axis.
Examples:
Case 1:
Given
X.shape = (3, 100, 100, 4)
and
start_axis = 2, stop_axis = -1
We get:
Out.shape = (3, 100, 400)
Case 2:
Given
X.shape = (3, 100, 100, 4)
and
start_axis = 0, stop_axis = -1
We get:
Out.shape = (3 * 100 * 100 * 4)
)DOC");
AddOutput("XShape",
"XShape is just used to store the shape and lod of X, which will "
"be used in FlattenGradOp.")
.AsIntermediate();
}
};
template <typename T>
class FlattenContiguousRangeGradOpMaker
: public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
void Apply(GradOpPtr<T> grad_op) const override {
grad_op->SetType("flatten_contiguous_range_grad");
grad_op->SetInput("XShape", this->Output("XShape"));
grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
grad_op->SetAttrMap(this->Attrs());
}
};
class FlattenContiguousRangeGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *context) const override {
OP_INOUT_CHECK(context->HasInput("XShape"), "Input", "XShape",
"FlattenContiguousRangeGrad");
OP_INOUT_CHECK(context->HasInput(framework::GradVarName("Out")), "Input",
framework::GradVarName("Out"), "FlattenContiguousRangeGrad");
auto xshape_dims = context->GetInputDim("XShape");
auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size());
context->SetOutputDim(framework::GradVarName("X"), x_dims);
context->ShareLoD("XShape", framework::GradVarName("X"));
}
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
return framework::OpKernelType(OperatorWithKernel::IndicateVarDataType(
ctx, framework::GradVarName("Out")),
ctx.device_context());
}
};
DECLARE_INPLACE_OP_INFERER(FlattenOpInplaceInferer, {"X", "Out"});
DECLARE_INPLACE_OP_INFERER(FlattenGradInplaceInferer,
{framework::GradVarName("Out"),
......@@ -266,6 +416,16 @@ REGISTER_OPERATOR(flatten2, ops::Flatten2Op, ops::Flatten2OpMaker,
REGISTER_OPERATOR(flatten2_grad, ops::Flatten2GradOp,
ops::FlattenGradInplaceInferer);
REGISTER_OPERATOR(
flatten_contiguous_range, ops::FlattenContiguousRangeOp,
ops::FlattenContiguousRangeOpMaker,
ops::FlattenContiguousRangeGradOpMaker<paddle::framework::OpDesc>,
ops::FlattenContiguousRangeGradOpMaker<paddle::imperative::OpBase>,
ops::FlattenOpInplaceInferer);
REGISTER_OPERATOR(flatten_contiguous_range_grad,
ops::FlattenContiguousRangeGradOp,
ops::FlattenGradInplaceInferer);
REGISTER_OP_CPU_KERNEL(
flatten, ops::FlattenKernel<paddle::platform::CPUDeviceContext, float>,
ops::FlattenKernel<paddle::platform::CPUDeviceContext, double>,
......@@ -292,3 +452,26 @@ REGISTER_OP_CPU_KERNEL(
ops::Flatten2GradKernel<paddle::platform::CPUDeviceContext, int>,
ops::Flatten2GradKernel<paddle::platform::CPUDeviceContext, int8_t>,
ops::Flatten2GradKernel<paddle::platform::CPUDeviceContext, int64_t>);
REGISTER_OP_CPU_KERNEL(
flatten_contiguous_range,
ops::FlattenContiguousRangeKernel<paddle::platform::CPUDeviceContext,
float>,
ops::FlattenContiguousRangeKernel<paddle::platform::CPUDeviceContext,
double>,
ops::FlattenContiguousRangeKernel<paddle::platform::CPUDeviceContext, int>,
ops::FlattenContiguousRangeKernel<paddle::platform::CPUDeviceContext,
int8_t>,
ops::FlattenContiguousRangeKernel<paddle::platform::CPUDeviceContext,
int64_t>);
REGISTER_OP_CPU_KERNEL(
flatten_contiguous_range_grad,
ops::FlattenContiguousRangeGradKernel<paddle::platform::CPUDeviceContext,
float>,
ops::FlattenContiguousRangeGradKernel<paddle::platform::CPUDeviceContext,
double>,
ops::FlattenContiguousRangeGradKernel<paddle::platform::CPUDeviceContext,
int>,
ops::FlattenContiguousRangeGradKernel<paddle::platform::CPUDeviceContext,
int8_t>,
ops::FlattenContiguousRangeGradKernel<paddle::platform::CPUDeviceContext,
int64_t>);
......@@ -42,3 +42,26 @@ REGISTER_OP_CUDA_KERNEL(
ops::Flatten2GradKernel<paddle::platform::CUDADeviceContext, int>,
ops::Flatten2GradKernel<paddle::platform::CUDADeviceContext, int8_t>,
ops::Flatten2GradKernel<paddle::platform::CUDADeviceContext, int64_t>);
REGISTER_OP_CUDA_KERNEL(
flatten_contiguous_range,
ops::FlattenContiguousRangeKernel<paddle::platform::CUDADeviceContext,
float>,
ops::FlattenContiguousRangeKernel<paddle::platform::CUDADeviceContext,
double>,
ops::FlattenContiguousRangeKernel<paddle::platform::CUDADeviceContext, int>,
ops::FlattenContiguousRangeKernel<paddle::platform::CUDADeviceContext,
int8_t>,
ops::FlattenContiguousRangeKernel<paddle::platform::CUDADeviceContext,
int64_t>);
REGISTER_OP_CUDA_KERNEL(
flatten_contiguous_range_grad,
ops::FlattenContiguousRangeGradKernel<paddle::platform::CUDADeviceContext,
float>,
ops::FlattenContiguousRangeGradKernel<paddle::platform::CUDADeviceContext,
double>,
ops::FlattenContiguousRangeGradKernel<paddle::platform::CUDADeviceContext,
int>,
ops::FlattenContiguousRangeGradKernel<paddle::platform::CUDADeviceContext,
int8_t>,
ops::FlattenContiguousRangeGradKernel<paddle::platform::CUDADeviceContext,
int64_t>);
......@@ -112,5 +112,73 @@ class Flatten2GradKernel : public framework::OpKernel<T> {
}
};
template <typename DeviceContext, typename T>
class FlattenContiguousRangeKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto &start_axis = context.Attr<int>("start_axis");
auto &stop_axis = context.Attr<int>("stop_axis");
auto *in = context.Input<framework::LoDTensor>("X");
auto x_dims = in->dims();
int in_dims_size = x_dims.size();
int real_start_axis = start_axis, real_stop_axis = stop_axis;
if (start_axis < 0) {
real_start_axis = start_axis + in_dims_size;
}
if (stop_axis < 0) {
real_stop_axis = stop_axis + in_dims_size;
}
auto *out = context.Output<framework::LoDTensor>("Out");
auto out_dims = framework::make_ddim(
GetOutputShape(real_start_axis, real_stop_axis, x_dims));
out->mutable_data(context.GetPlace(), in->type());
framework::TensorCopy(
*in, context.GetPlace(),
context.template device_context<platform::DeviceContext>(), out);
out->Resize(out_dims);
}
static std::vector<int32_t> GetOutputShape(const int start_axis,
const int stop_axis,
const framework::DDim &in_dims) {
int64_t outer = 1;
std::vector<int32_t> out_shape;
int in_dims_size = in_dims.size();
out_shape.reserve(in_dims_size - stop_axis + start_axis);
for (int i = 0; i < start_axis; ++i) {
out_shape.push_back(in_dims[i]);
}
for (int i = start_axis; i <= stop_axis; i++) {
outer *= in_dims[i];
}
out_shape.push_back(outer);
for (int i = stop_axis + 1; i < in_dims_size; i++) {
out_shape.push_back(in_dims[i]);
}
return out_shape;
}
};
template <typename DeviceContext, typename T>
class FlattenContiguousRangeGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto *d_x = ctx.Output<framework::LoDTensor>(framework::GradVarName("X"));
auto *d_out =
ctx.Input<framework::LoDTensor>(framework::GradVarName("Out"));
auto xshape_dims = ctx.Input<framework::LoDTensor>("XShape")->dims();
auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size());
d_x->mutable_data(ctx.GetPlace(), d_out->type());
framework::TensorCopySync(*d_out, ctx.GetPlace(), d_x);
d_x->Resize(x_dims);
}
};
} // namespace operators
} // namespace paddle
......@@ -35,7 +35,7 @@ __all__ = [
'Conv2D', 'Conv3D', 'Pool2D', 'Linear', 'BatchNorm', 'Dropout', 'Embedding',
'GRUUnit', 'InstanceNorm', 'LayerNorm', 'NCE', 'PRelu',
'BilinearTensorProduct', 'Conv2DTranspose', 'Conv3DTranspose', 'GroupNorm',
'SpectralNorm', 'TreeConv'
'SpectralNorm', 'TreeConv', 'Flatten'
]
......@@ -3182,3 +3182,62 @@ class TreeConv(layers.Layer):
else:
pre_activation = out
return self._helper.append_activation(pre_activation, act=self._act)
class Flatten(layers.Layer):
"""
:alias_main: paddle.nn.Flatten
:alias: paddle.nn.Flatten,paddle.nn.layer.Flatten,paddle.nn.layer.common.Flatten
This interface is used to construct a callable object of the ``FLatten`` class.
For more details, refer to code examples.
It implements flatten a contiguous range of dims into a tensor.
Equation:
Parameters:
start_axis(int): first dim to flatten (default = 1)
stop_axis(int): last dim to flatten (default = -1).
Returns:
None
Examples:
.. code-block:: python
import paddle
from paddle.imperative import to_variable
import numpy as np
inp_np = np.ones([5, 2, 3, 4]).astype('float32')
paddle.enable_imperative()
inp_np = to_variable(inp_np)
flatten = paddle.nn.Flatten(start_axis=1, stop_axis=2)
flatten_res = flatten(inp_np)
"""
def __init__(self, start_axis=1, stop_axis=-1):
super(Flatten, self).__init__()
self.start_axis = start_axis
self.stop_axis = stop_axis
def forward(self, input):
out = self._helper.create_variable_for_type_inference(input.dtype)
x_shape = self._helper.create_variable_for_type_inference(input.dtype)
if in_dygraph_mode():
dy_out, _ = core.ops.flatten_contiguous_range(
input, 'start_axis', self.start_axis, 'stop_axis',
self.stop_axis)
return dy_out
self._helper.append_op(
type="flatten_contiguous_range",
inputs={"X": input},
outputs={"Out": out,
"XShape": x_shape},
attrs={"start_axis": self.start_axis,
"stop_axis": self.stop_axis})
return out
......@@ -471,9 +471,9 @@ def rpn_target_assign(bbox_pred,
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
"""
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
**Sigmoid Focal Loss Operator.**
......@@ -628,9 +628,9 @@ def detection_output(loc,
nms_eta=1.0,
return_index=False):
"""
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps:
......@@ -761,9 +761,9 @@ def detection_output(loc,
@templatedoc()
def iou_similarity(x, y, box_normalized=True, name=None):
"""
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
${comment}
......@@ -821,9 +821,9 @@ def box_coder(prior_box,
name=None,
axis=0):
"""
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer**
......@@ -1012,9 +1012,9 @@ def yolov3_loss(x,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment}
......@@ -1139,9 +1139,9 @@ def yolo_box(x,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
......@@ -1318,9 +1318,9 @@ def bipartite_match(dist_matrix,
dist_threshold=None,
name=None):
"""
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
......@@ -1412,9 +1412,9 @@ def target_assign(input,
mismatch_value=None,
name=None):
"""
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as
......@@ -1530,9 +1530,9 @@ def ssd_loss(location,
normalize=True,
sample_size=None):
"""
:alias_main: paddle.nn.functional.ssd_loss
:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
:old_api: paddle.fluid.layers.ssd_loss
:alias_main: paddle.nn.functional.ssd_loss
:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
:old_api: paddle.fluid.layers.ssd_loss
**Multi-box loss layer for object detection algorithm of SSD**
......@@ -1777,9 +1777,9 @@ def prior_box(input,
name=None,
min_max_aspect_ratios_order=False):
"""
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
......@@ -1938,9 +1938,9 @@ def density_prior_box(input,
flatten_to_2d=False,
name=None):
"""
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
......@@ -2130,7 +2130,7 @@ def multi_box_head(inputs,
name=None,
min_max_aspect_ratios_order=False):
"""
:api_attr: Static Graph
:api_attr: Static Graph
Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
regression location and classification confidence on multiple input feature
......@@ -2407,9 +2407,9 @@ def anchor_generator(input,
offset=0.5,
name=None):
"""
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
......@@ -2612,9 +2612,9 @@ def generate_proposal_labels(rpn_rois,
is_cls_agnostic=False,
is_cascade_rcnn=False):
"""
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN**
......@@ -2737,9 +2737,9 @@ def generate_proposal_labels(rpn_rois,
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
"""
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
......@@ -2896,9 +2896,9 @@ def generate_proposals(scores,
name=None,
return_rois_num=False):
"""
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN**
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle
from op_test import OpTest
class TestFlattenOp(OpTest):
def setUp(self):
self.op_type = "flatten_contiguous_range"
self.start_axis = 0
self.stop_axis = -1
self.init_test_case()
self.inputs = {"X": np.random.random(self.in_shape).astype("float64")}
self.init_attrs()
self.outputs = {
"Out": self.inputs["X"].reshape(self.new_shape),
"XShape": np.random.random(self.in_shape).astype("float32")
}
def test_check_output(self):
self.check_output(no_check_set=["XShape"])
def test_check_grad(self):
self.check_grad(["X"], "Out")
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 0
self.stop_axis = -1
self.new_shape = (120)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOp_1(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 1
self.stop_axis = 2
self.new_shape = (3, 10, 4)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOp_2(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 0
self.stop_axis = 1
self.new_shape = (6, 5, 4)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOp_3(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 0
self.stop_axis = 2
self.new_shape = (30, 4)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOp_4(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = -2
self.stop_axis = -1
self.new_shape = (3, 2, 20)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOp_5(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 2
self.stop_axis = 2
self.new_shape = (3, 2, 5, 4)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOpSixDims(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 3, 2, 4, 4)
self.start_axis = 3
self.stop_axis = 5
self.new_shape = (3, 2, 3, 32)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlatten2OpError(unittest.TestCase):
def test_errors(self):
image_shape = (2, 3, 4, 4)
x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] *
image_shape[3]).reshape(image_shape) / 100.
x = x.astype('float32')
def test_ValueError1():
x_var = paddle.nn.data(name="x", shape=image_shape, dtype='float32')
out = paddle.flatten(x_var, start_axis=2, stop_axis=1)
self.assertRaises(ValueError, test_ValueError1)
def test_ValueError2():
x_var = paddle.nn.data(name="x", shape=image_shape, dtype='float32')
paddle.flatten(x_var, start_axis=10, stop_axis=1)
self.assertRaises(ValueError, test_ValueError2)
def test_ValueError3():
x_var = paddle.nn.data(name="x", shape=image_shape, dtype='float32')
paddle.flatten(x_var, start_axis=2, stop_axis=10)
self.assertRaises(ValueError, test_ValueError3)
def test_type():
# dtype must be float32, float64, int8, int32, int64.
x2 = np.arange(image_shape[0] * image_shape[1] * image_shape[2] *
image_shape[3]).reshape(image_shape) / 100.
x2 = x2.astype('float16')
x2_var = paddle.data(name='x2', shape=[3, 2, 4, 5], dtype='float16')
paddle.flatten(x2_var)
self.assertRaises(TypeError, test_type)
def test_InputError():
out = paddle.flatten(x)
self.assertRaises(ValueError, test_InputError)
class TestFlattenPython(unittest.TestCase):
def test_python_api(self):
image_shape = (2, 3, 4, 4)
x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] *
image_shape[3]).reshape(image_shape) / 100.
x = x.astype('float32')
def test_InputError():
out = paddle.flatten(x)
self.assertRaises(ValueError, test_InputError)
def test_Negative():
paddle.enable_imperative()
img = paddle.imperative.to_variable(x)
out = paddle.flatten(img, start_axis=-2, stop_axis=-1)
return out.numpy().shape
res_shape = test_Negative()
self.assertTrue((2, 3, 16) == res_shape)
if __name__ == "__main__":
unittest.main()
......@@ -180,6 +180,51 @@ class TestLayer(LayerTest):
self.assertRaises(TypeError, test_type)
def test_Flatten(self):
inp = np.ones([3, 4, 4, 5], dtype='float32')
with self.static_graph():
t = layers.data(
name='data',
shape=[3, 4, 4, 5],
dtype='float32',
append_batch_size=False)
flatten = nn.Flatten()
ret = flatten(t)
static_ret = self.get_static_graph_result(
feed={'data': inp}, fetch_list=[ret])[0]
with self.dynamic_graph():
t = base.to_variable(inp)
flatten = nn.Flatten()
dy_ret = flatten(t)
dy_ret_value = dy_ret.numpy()
self.assertTrue(np.array_equal(static_ret, dy_ret_value))
with self.static_graph():
# the input of Linear must be Variable.
def test_Variable():
inp = np.ones([3, 32, 32], dtype='float32')
linear = nn.Linear(
32,
4,
bias_attr=fluid.initializer.ConstantInitializer(value=1))
linear_ret1 = linear(inp)
self.assertRaises(TypeError, test_Variable)
# the input dtype of Linear must be float16 or float32 or float64
# float16 only can be set on GPU place
def test_type():
inp = np.ones([3, 32, 32], dtype='int32')
linear = nn.Linear(
32,
4,
bias_attr=fluid.initializer.ConstantInitializer(value=1))
linear_ret2 = linear(inp)
self.assertRaises(TypeError, test_type)
def test_layer_norm(self):
inp = np.ones([3, 32, 32], dtype='float32')
with self.static_graph():
......
......@@ -17,6 +17,7 @@ no_check_set_white_list = [
'fake_quantize_range_abs_max',
'coalesce_tensor',
'flatten2',
'flatten_contiguous_range',
'lrn',
'squeeze2',
'reshape2',
......
......@@ -63,6 +63,7 @@ from .layer.common import Pool2D #DEFINE_ALIAS
from .layer.common import Pad2D #DEFINE_ALIAS
from .layer.common import Embedding #DEFINE_ALIAS
from .layer.common import Linear #DEFINE_ALIAS
from .layer.common import Flatten #DEFINE_ALIAS
from .layer.common import UpSample #DEFINE_ALIAS
from .layer.conv import Conv2D #DEFINE_ALIAS
from .layer.conv import Conv2DTranspose #DEFINE_ALIAS
......
......@@ -39,6 +39,7 @@ from .common import Pool2D #DEFINE_ALIAS
from .common import Pad2D #DEFINE_ALIAS
from .common import Embedding #DEFINE_ALIAS
from .common import Linear #DEFINE_ALIAS
from .common import Flatten #DEFINE_ALIAS
from .common import UpSample #DEFINE_ALIAS
from .conv import Conv2D #DEFINE_ALIAS
from .conv import Conv2DTranspose #DEFINE_ALIAS
......
......@@ -17,6 +17,7 @@ from ...fluid.dygraph import BilinearTensorProduct #DEFINE_ALIAS
from ...fluid.dygraph import Pool2D #DEFINE_ALIAS
from ...fluid.dygraph import Embedding #DEFINE_ALIAS
from ...fluid.dygraph import Linear #DEFINE_ALIAS
from ...fluid.dygraph import Flatten #DEFINE_ALIAS
from ...fluid.dygraph import layers
from .. import functional as F
......
......@@ -25,7 +25,6 @@ import numpy as np
from ..fluid.layers import cast #DEFINE_ALIAS
from ..fluid.layers import expand #DEFINE_ALIAS
from ..fluid.layers import expand_as #DEFINE_ALIAS
from ..fluid.layers import flatten #DEFINE_ALIAS
from ..fluid.layers import reshape #DEFINE_ALIAS
from ..fluid.layers import scatter #DEFINE_ALIAS
from ..fluid.layers import slice #DEFINE_ALIAS
......@@ -169,6 +168,114 @@ def flip(x, axis, name=None):
reverse = flip #DEFINE_ALIAS
def flatten(x, start_axis=0, stop_axis=-1, name=None):
"""
**Flatten op**
Flattens a contiguous range of axes in a tensor according to start_axis and stop_axis.
For Example:
.. code-block:: text
Case 1:
Given
X.shape = (3, 100, 100, 4)
and
start_axis = 1
end_axis = 2
We get:
Out.shape = (3, 1000 * 100, 2)
Case 2:
Given
X.shape = (3, 100, 100, 4)
and
start_axis = 0
stop_axis = -1
We get:
Out.shape = (3 * 100 * 100 * 4)
Args:
x (Variable): A tensor of number of dimentions >= axis. A tensor with data type float32,
float64, int8, int32, int64.
start_axis (int): the start axis to flatten
stop_axis (int): the stop axis to flatten
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: A tensor with the contents of the input tensor, with input \
axes flattened by indicated start axis and end axis. \
A Tensor with data type same as input x.
Raises:
ValueError: If x is not a Variable.
ValueError: If start_axis or stop_axis is illegal.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.enable_imperative()
image_shape=(2, 3, 4, 4)
x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3]).reshape(image_shape) / 100.
x = x.astype('float32')
img = paddle.imperative.to_variable(x)
out = paddle.flatten(img, start_axis=1, stop_axis=2)
# out shape is [2, 12, 4]
"""
if not (isinstance(x, Variable)):
raise ValueError("The input x should be a Variable")
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64'], 'flatten')
helper = LayerHelper('flatten', **locals())
x_dim = len(x.shape)
if not (isinstance(start_axis, int)) or (
start_axis > x_dim - 1) or start_axis < -x_dim:
raise ValueError(
"The start_axis should be a int, and in range [-rank(x), rank(x))")
if not (isinstance(stop_axis, int)) or (
stop_axis > x_dim - 1) or stop_axis < -x_dim:
raise ValueError(
"The stop_axis should be a int, and in range [-rank(x), rank(x))")
if start_axis < 0:
start_axis = start_axis + x_dim
if stop_axis < 0:
stop_axis = stop_axis + x_dim
if start_axis > stop_axis:
raise ValueError("The stop_axis should be larger than stat_axis")
if in_dygraph_mode():
dy_out, _ = core.ops.flatten_contiguous_range(
x, 'start_axis', start_axis, 'stop_axis', stop_axis)
return dy_out
out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='flatten_contiguous_range',
inputs={"X": x},
outputs={'Out': out,
'XShape': x_shape},
attrs={"start_axis": start_axis,
"stop_axis": stop_axis})
return out
def roll(x, shifts, axis=None, name=None):
"""
:alias_main: paddle.roll
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册