提交 28c98103 编写于 作者: W wangmeng28

Merge remote-tracking branch 'upstream/develop' into factorization_machine_layer

......@@ -389,13 +389,60 @@ function(go_test TARGET_NAME)
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
endfunction(go_test)
# Modification of standard 'protobuf_generate_cpp()' with protobuf-lite support
# Usage:
# paddle_protobuf_generate_cpp(<proto_srcs> <proto_hdrs> <proto_files>)
function(paddle_protobuf_generate_cpp SRCS HDRS)
if(NOT ARGN)
message(SEND_ERROR "Error: paddle_protobuf_generate_cpp() called without any proto files")
return()
endif()
set(${SRCS})
set(${HDRS})
if (MOBILE_INFERENCE)
set(EXTRA_FLAG "lite:")
else()
set(EXTRA_FLAG "")
endif()
foreach(FIL ${ARGN})
get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
get_filename_component(FIL_WE ${FIL} NAME_WE)
set(_protobuf_protoc_src "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.cc")
set(_protobuf_protoc_hdr "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.h")
list(APPEND ${SRCS} "${_protobuf_protoc_src}")
list(APPEND ${HDRS} "${_protobuf_protoc_hdr}")
add_custom_command(
OUTPUT "${_protobuf_protoc_src}"
"${_protobuf_protoc_hdr}"
COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_CURRENT_BINARY_DIR}"
COMMAND ${PROTOBUF_PROTOC_EXECUTABLE}
-I${CMAKE_CURRENT_SOURCE_DIR}
--cpp_out "${EXTRA_FLAG}${CMAKE_CURRENT_BINARY_DIR}" ${ABS_FIL}
DEPENDS ${ABS_FIL} protoc
COMMENT "Running C++ protocol buffer compiler on ${FIL}"
VERBATIM )
endforeach()
set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED TRUE)
set(${SRCS} ${${SRCS}} PARENT_SCOPE)
set(${HDRS} ${${HDRS}} PARENT_SCOPE)
endfunction()
function(proto_library TARGET_NAME)
set(oneValueArgs "")
set(multiValueArgs SRCS DEPS)
cmake_parse_arguments(proto_library "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
set(proto_srcs)
set(proto_hdrs)
protobuf_generate_cpp(proto_srcs proto_hdrs ${proto_library_SRCS})
paddle_protobuf_generate_cpp(proto_srcs proto_hdrs ${proto_library_SRCS})
cc_library(${TARGET_NAME} SRCS ${proto_srcs} DEPS ${proto_library_DEPS} protobuf)
endfunction()
......
......@@ -43,11 +43,11 @@ cc_library(backward SRCS backward.cc DEPS net_op)
cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context)
cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto backward ${GLOB_OP_LIB})
if(WITH_GPU)
nv_test(executor_test SRCS executor_test.cc DEPS executor)
else()
cc_test(executor_test SRCS executor_test.cc DEPS executor)
endif()
#if(WITH_GPU)
# nv_test(executor_test SRCS executor_test.cc DEPS executor)
#else()
# cc_test(executor_test SRCS executor_test.cc DEPS executor)
#endif()
cc_library(tensor_array SRCS tensor_array.cc DEPS lod_tensor)
cc_test(tensor_array_test SRCS tensor_array_test.cc DEPS tensor_array place)
......@@ -25,16 +25,6 @@ limitations under the License. */
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
USE_OP(elementwise_add);
USE_OP(gaussian_random);
USE_OP(feed);
USE_OP(fetch);
USE_OP(mul);
USE_OP(sum);
USE_OP(squared_l2_distance);
USE_OP(fill_constant);
USE_OP(sgd);
using namespace paddle::platform;
using namespace paddle::framework;
......
......@@ -112,7 +112,9 @@ set(DEPS_OPS
cond_op
cross_entropy_op
softmax_with_cross_entropy_op
sum_op)
sum_op
pool_op
pool_with_index_op)
op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc
......@@ -121,6 +123,8 @@ op_library(cond_op SRCS cond_op.cc DEPS framework_proto tensor operator net_op)
op_library(cross_entropy_op DEPS cross_entropy)
op_library(softmax_with_cross_entropy_op DEPS cross_entropy softmax)
op_library(sum_op DEPS net_op)
op_library(pool_op DEPS pooling)
op_library(pool_with_index_op DEPS pooling)
list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS})
foreach(src ${GENERAL_OPS})
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/margin_rank_loss_op.h"
namespace paddle {
namespace operators {
class MarginRankLossOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(framework::InferShapeContext *ctx) const override {
// input check
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasInput("X1"), "Input(X1) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasInput("X2"), "Input(X2) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) shouldn't be null.");
auto label_dims = ctx->GetInputDim("Label");
auto x1_dims = ctx->GetInputDim("X1");
auto x2_dims = ctx->GetInputDim("X2");
PADDLE_ENFORCE(
(label_dims == x1_dims) && (x1_dims == x2_dims) &&
(label_dims.size() == 2) && (label_dims[1] == 1),
"All inputs must be 2-D tensor with shape [batch_size x 1].");
ctx->SetOutputDim("Activated", label_dims);
ctx->SetOutputDim("Out", label_dims);
}
};
template <typename T>
class MarginRankLossOpMaker : public framework::OpProtoAndCheckerMaker {
public:
MarginRankLossOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X1",
"(2-D tensor with shape [batch_size x 1]) The score for "
"one item X1 to be ranked, from pairwise ranking model.");
AddInput("X2",
"(2-D tensor with shape [batch_size x 1]) The score for "
"another item X2 to be ranked, from pairwise ranking model.");
AddInput("Label",
"(2-D tensor with shape [batch_size x 1]) "
"The label indicating X1 ranked higher than X2 or not, "
"can only be +1 or -1.");
AddAttr<T>("margin", "(scalar, default 0) Margin for MarginRankLossOp.")
.SetDefault(static_cast<T>(0));
AddOutput("Activated",
"(2-D tensor with shape [batch_size x 1]) Intermediate tensor "
"to indicate whether each element of Output(Out) is activated.")
.AsIntermediate();
AddOutput("Out",
"(2-D tensor with shape [batch_size x 1]) "
"The output loss of MarginRankLoss operator.");
AddComment(R"DOC(
MarginRankLoss operator measures the loss given a pair of training sample
{`X1`, `X2`} and the `Label` with attribute `margin`, where `Label = +1`
indicating X1 is ranked higher than `X2`, otherwise `Label = -1`. The loss
turns out
loss(X1, X2, Label) = max(0, -Label * (X1 - X2) + margin).
The attribute `margin` involved here helps make the predictions more robust.
Denote the item ranked higher as the positive sample, otherwise the negative
sample. If the score of the two samples satisfies
positive sample - negative sample < margin,
the pair of samples will contribute to the final loss, which will backpropogate
and train the ranking model to enlarge the difference of the two score.
For batch input with size `batch_size`, `X1`, `X2` and `Label`
all have the same shape [batch_size x 1].
)DOC");
}
};
class MarginRankLossGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasInput("X1"), "Input(X1) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasInput("X2"), "Input(X2) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasInput("Activated"),
"Intermediate(Activated) shouldn't be null.");
auto dims = ctx->GetInputDim("Label");
ctx->SetOutputDim(framework::GradVarName("X1"), dims);
ctx->SetOutputDim(framework::GradVarName("X2"), dims);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(margin_rank_loss, ops::MarginRankLossOp,
ops::MarginRankLossOpMaker<float>, margin_rank_loss_grad,
ops::MarginRankLossGradOp);
REGISTER_OP_CPU_KERNEL(
margin_rank_loss,
ops::MarginRankLossKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(
margin_rank_loss_grad,
ops::MarginRankLossGradKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/margin_rank_loss_op.h"
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(
margin_rank_loss,
ops::MarginRankLossKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(
margin_rank_loss_grad,
ops::MarginRankLossGradKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
template <typename T>
struct ReLU {
HOSTDEVICE T operator()(const T& val) const {
return val > 0 ? val : static_cast<T>(0);
}
};
template <typename T>
struct Heaviside {
HOSTDEVICE T operator()(const T& val) const {
return static_cast<T>(val > 0 ? 1 : 0);
}
};
template <typename Place, typename T>
class MarginRankLossKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto* out_t = ctx.Output<framework::Tensor>("Out");
auto* act_t = ctx.Output<framework::Tensor>("Activated");
auto* label_t = ctx.Input<framework::Tensor>("Label");
auto* x1_t = ctx.Input<framework::Tensor>("X1");
auto* x2_t = ctx.Input<framework::Tensor>("X2");
out_t->mutable_data<T>(ctx.GetPlace());
act_t->mutable_data<T>(ctx.GetPlace());
auto margin = static_cast<T>(ctx.Attr<T>("margin"));
auto out = framework::EigenVector<T>::Flatten(*out_t);
auto act = framework::EigenVector<T>::Flatten(*act_t);
auto label = framework::EigenVector<T>::Flatten(*label_t);
auto x1 = framework::EigenVector<T>::Flatten(*x1_t);
auto x2 = framework::EigenVector<T>::Flatten(*x2_t);
auto& dev = ctx.GetEigenDevice<Place>();
out.device(dev) = (-label * (x1 - x2) + margin).unaryExpr(ReLU<T>());
act.device(dev) = out.unaryExpr(Heaviside<T>());
}
};
template <typename Place, typename T>
class MarginRankLossGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto* d_x1_t =
ctx.Output<framework::LoDTensor>(framework::GradVarName("X1"));
auto* d_x2_t =
ctx.Output<framework::LoDTensor>(framework::GradVarName("X2"));
auto* act_t = ctx.Input<framework::Tensor>("Activated");
auto* d_out_t = ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* label_t = ctx.Input<framework::Tensor>("Label");
auto d_out = framework::EigenVector<T>::Flatten(*d_out_t);
auto act = framework::EigenVector<T>::Flatten(*act_t);
auto label = framework::EigenVector<T>::Flatten(*label_t);
auto& dev = ctx.GetEigenDevice<Place>();
// compute d_x1
if (d_x1_t) {
d_x1_t->mutable_data<T>(ctx.GetPlace());
auto d_x1 = framework::EigenVector<T>::Flatten(*d_x1_t);
d_x1.device(dev) = -d_out * act * label;
}
// compute d_x2
if (d_x2_t) {
d_x2_t->mutable_data<T>(ctx.GetPlace());
auto d_x2 = framework::EigenVector<T>::Flatten(*d_x2_t);
d_x2.device(dev) = d_out * act * label;
}
}
};
} // namespace operators
} // namespace paddle
if(WITH_GPU)
nv_library(math_function SRCS math_function.cc math_function.cu im2col.cc im2col.cu pooling.cc pooling.cu DEPS cblas device_context operator)
nv_library(math_function SRCS math_function.cc math_function.cu im2col.cc im2col.cu DEPS cblas device_context operator)
nv_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor)
nv_library(softmax SRCS softmax.cc softmax.cu DEPS operator)
nv_library(cross_entropy SRCS cross_entropy.cc cross_entropy.cu DEPS operator)
nv_library(pooling SRCS pooling.cc pooling.cu DEPS device_context)
nv_library(vol2col SRCS vol2col.cc vol2col.cu DEPS device_context)
else()
cc_library(math_function SRCS math_function.cc im2col.cc pooling.cc DEPS cblas device_context operator)
cc_library(math_function SRCS math_function.cc im2col.cc DEPS cblas device_context operator)
cc_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor)
cc_library(softmax SRCS softmax.cc DEPS operator)
cc_library(cross_entropy SRCS cross_entropy.cc DEPS operator)
cc_library(pooling SRCS pooling.cc DEPS device_context)
cc_library(vol2col SRCS vol2col.cc DEPS device_context)
endif()
......
......@@ -22,14 +22,8 @@ int OutputSizePool(int input_size, int filter_size, int padding, int stride) {
return output_size;
}
class PoolOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"X(Input) of Pooling should not be null.");
void PoolOp::InferShape(framework::InferShapeContext *ctx) const {
PADDLE_ENFORCE(ctx->HasInput("X"), "X(Input) of Pooling should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Out(Output) of Pooling should not be null.");
......@@ -40,10 +34,8 @@ class PoolOp : public framework::OperatorWithKernel {
std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
PADDLE_ENFORCE(pooling_type == "max" || pooling_type == "avg",
"pooling_type should be 'max' or 'avg'");
PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5,
"Pooling intput should be 4-D or 5-D");
"Pooling intput should be 4-D or 5-D tensor.");
if (ctx->Attrs().Get<bool>("globalPooling")) {
ksize.resize(static_cast<size_t>(in_x_dims.size()) - 2);
......@@ -52,13 +44,11 @@ class PoolOp : public framework::OperatorWithKernel {
}
PADDLE_ENFORCE(in_x_dims.size() - ksize.size() == 2U,
"Input size and Pooling size should be consistent.");
PADDLE_ENFORCE(ksize.size() == 2 || ksize.size() == 3,
"Pooling size should be 2 elements. or 3 elements.");
"Input size and pooling size should be consistent.");
PADDLE_ENFORCE_EQ(ksize.size(), strides.size(),
"strides size and pooling size should be the same.");
"Strides size and pooling size should be the same.");
PADDLE_ENFORCE_EQ(ksize.size(), paddings.size(),
"paddings size and pooling size should be the same.");
"Paddings size and pooling size should be the same.");
std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
for (size_t i = 0; i < ksize.size(); ++i) {
......@@ -66,45 +56,41 @@ class PoolOp : public framework::OperatorWithKernel {
OutputSizePool(in_x_dims[i + 2], ksize[i], paddings[i], strides[i]));
}
ctx->SetOutputDim("Out", framework::make_ddim(output_shape));
}
};
class PoolOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
}
protected:
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"X(Input) of Pooling should not be null.");
void PoolOpGrad::InferShape(framework::InferShapeContext *ctx) const {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null.");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
"Input@Grad of Pooling should not be null.");
"Input(X@GRAD) should not be null.");
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
}
};
}
class Pool2dOpMaker : public framework::OpProtoAndCheckerMaker {
public:
Pool2dOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
Pool2dOpMaker::Pool2dOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput(
"X",
"The input tensor of pooling operator. "
"(Tensor) The input tensor of pooling operator. "
"The format of input tensor is NCHW. Where N is batch size, C is the "
"number of channels, H and W is the height and width of feature.");
AddOutput("Out",
"The output tensor of pooling operator."
"The format of output tensor is also NCHW.");
"(Tensor) The output tensor of pooling operator."
"The format of output tensor is also NCHW."
"Where N is batch size, C is "
"the number of channels, H and W is the height and "
"width of feature.");
AddAttr<std::string>("poolingType",
"PoolingType of pooling operator."
"Str constant equal to 'max' or 'avg'.")
.InEnum({"max", "avg"});
AddAttr<std::vector<int>>(
"ksize",
"Pooling size(depth, height, width) of pooling operator."
"The pooling window size(height, width) of pooling operator."
"If globalPooling = true, ksize is ignored and need not be "
"specified."); // TODO(Add checker)
"specified."); // TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.)
AddAttr<bool>(
"globalPooling",
"Whether to use the globalPooling."
......@@ -113,43 +99,64 @@ class Pool2dOpMaker : public framework::OpProtoAndCheckerMaker {
"If globalPooling = true, ksize is ignored and need not be specified.")
.SetDefault(false);
AddAttr<std::vector<int>>("strides",
"Strides(height, width) of pooling operator."
"Default {1,1}")
.SetDefault({1, 1}); // TODO(Add checker)
"The strides(height, width) of pooling window."
"Default {1,1}.")
.SetDefault({1, 1}); // TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.)
AddAttr<std::vector<int>>("paddings",
"Paddings(height, width) of pooling operator."
"The zero padding(height, width) size on both sides"
"Default {0,0}.")
.SetDefault({0, 0}); // TODO(Add checker)
.SetDefault({0, 0}); // TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.)
AddComment(R"DOC(
The pooling2d operation calculates the output based on
the input, poolingType and ksize, strides, paddings parameters.
Input(X) and output(Out) are in NCHW format. Where N is batch size, C is the
number of channels, H and W is the height and width of feature.
Parameters(ksize, strides, paddings) are two elements.
These two elements represent height and width, respectively.
The input(X) size and output(Out) size may be different.
Example:
Input:
X shape: (N, C, H_in, W_in)
Output:
Out shape: (N, C, H_out, W_out)
Mask shape: (N, C, H_out, W_out)
where
H_out = (H_in - ksize[0] + 2 * paddings[0]) / strides[0] + 1;
W_out = (W_in - ksize[1] + 2 * paddings[1]) / strides[1] + 1;
)DOC");
}
};
}
class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker {
public:
Pool3dOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X",
"The input tensor of pooling operator. "
AddInput(
"X",
"(Tensor) The input tensor of pooling operator. "
"The format of input tensor is NCDHW. Where N is batch size, C is "
"the "
"number of channels, D, H and W is the depth, height and width of "
"the number of channels, D, H and W is the depth, height and width of "
"feature.");
AddOutput("Out",
"The output tensor of pooling operator."
"The format of output tensor is also NCDHW.");
"(Tensor) The output tensor of pooling operator."
"The format of output tensor is also NCDHW."
"Where N is batch size, C is "
"the number of channels, D, H and W is the depth, height and "
"width of feature.");
AddAttr<std::string>("poolingType",
"PoolingType of pooling operator."
"str constant equal to 'max' or 'avg'.")
"Str constant equal to 'max' or 'avg'.")
.InEnum({"max", "avg"});
AddAttr<std::vector<int>>(
"ksize",
"Pooling size(depth, height, width) of pooling operator."
"The pooling window size(depth, height, width) of pooling operator."
"If globalPooling = true, ksize is ignored and need not be "
"specified."); // TODO(Add checker)
"specified."); // TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.)
AddAttr<bool>(
"globalPooling",
"Whether to use the globalPooling."
......@@ -157,22 +164,39 @@ class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker {
"Default false."
"If globalPooling = true, ksize is ignored and need not be specified.")
.SetDefault(false);
AddAttr<std::vector<int>>(
"strides",
AddAttr<std::vector<int>>("strides",
"Strides(depth, height, width) of pooling operator."
"Default {1,1,1}.")
.SetDefault({1, 1, 1}); // TODO(Add checker)
.SetDefault({1, 1, 1}); // TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.)
AddAttr<std::vector<int>>(
"paddings",
"Paddings(depth, height, width) of pooling operator."
"Default {0,0,0}.")
.SetDefault({0, 0, 0}); // TODO(Add checker)
.SetDefault({0, 0, 0}); // TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.)
AddComment(R"DOC(
The pooling3d operation calculates the output based on
the input, poolingType and ksize, strides, paddings parameters.
Input(X) and output(Out) are in NCDHW format. Where N is batch
size, C is the number of channels, D, H and W is the depth, height and
width of feature. Parameters(ksize, strides, paddings) are three elements.
These three elements represent depth, height and width, respectively.
The input(X) size and output(Out) size may be different.
Example:
Input:
X shape: (N, C, D_in, H_in, W_in)
Output:
Out shape: (N, C, D_out, H_out, W_out)
Mask shape: (N, C, D_out, H_out, W_out)
where
D_out = (D_in - ksize[0] + 2 * paddings[0]) / strides[0] + 1;
H_out = (H_in - ksize[1] + 2 * paddings[1]) / strides[1] + 1;
W_out = (W_in - ksize[2] + 2 * paddings[2]) / strides[2] + 1;
)DOC");
}
};
}
} // namespace operators
} // namespace paddle
......
......@@ -24,6 +24,34 @@ namespace operators {
using Tensor = framework::Tensor;
class PoolOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(framework::InferShapeContext* ctx) const override;
};
class PoolOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(framework::InferShapeContext* ctx) const override;
};
class Pool2dOpMaker : public framework::OpProtoAndCheckerMaker {
public:
Pool2dOpMaker(framework::OpProto* proto,
framework::OpAttrChecker* op_checker);
};
class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker {
public:
Pool3dOpMaker(framework::OpProto* proto,
framework::OpAttrChecker* op_checker);
};
template <typename Place, typename T>
class PoolKernel : public framework::OpKernel<T> {
public:
......
......@@ -43,7 +43,7 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel {
std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5,
"Pooling intput should be 4-D or 5-D");
"Pooling intput should be 4-D or 5-D tensor.");
if (ctx->Attrs().Get<bool>("globalPooling")) {
ksize.resize(static_cast<size_t>(in_x_dims.size()) - 2);
......@@ -52,7 +52,7 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel {
}
PADDLE_ENFORCE(in_x_dims.size() - ksize.size() == 2U,
"Intput size and pooling size should be consistent.");
"Input size and pooling size should be consistent.");
PADDLE_ENFORCE_EQ(ksize.size(), strides.size(),
"Strides size and pooling size should be the same.");
PADDLE_ENFORCE_EQ(ksize.size(), paddings.size(),
......@@ -74,6 +74,7 @@ class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Mask"), "Input(Mask) must not be null.");
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null.");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
"Input(X@GRAD) should not be null.");
......@@ -88,17 +89,17 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput(
"X",
"The input tensor of pooling operator. "
"(Tensor) The input tensor of pooling operator. "
"The format of input tensor is NCHW. Where N is batch size, C is the "
"number of channels, H and W is the height and width of image.");
AddOutput("Out",
"The output tensor of pooling operator."
"(Tensor) The output tensor of pooling operator."
"The format of output tensor is also NCHW."
"Where N is batch size, C is "
"the number of channels, H and W is the height and "
"width of image.");
AddOutput("Mask",
"The Mask tensor of pooling operator."
"(Tensor) The Mask tensor of pooling operator."
"The format of output tensor is also NCHW."
"Where N is batch size, C is the number of channels, H and W "
"is the height and width of image."
......@@ -106,7 +107,7 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<std::vector<int>>(
"ksize",
"The pooling size(height, width) of pooling operator."
"The pooling window size(height, width) of pooling operator."
"If globalPooling = true, ksize is ignored and need not be "
"specified."); // TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.)
......@@ -118,12 +119,13 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
"If globalPooling = true, ksize is ignored and need not be specified.")
.SetDefault(false);
AddAttr<std::vector<int>>("strides",
"Strides(height, width) of pooling operator."
"The strides(height, width) of pooling window."
"Default {1,1}.")
.SetDefault({1, 1}); // TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.)
AddAttr<std::vector<int>>("paddings",
"Paddings(height, width) of pooling operator."
AddAttr<std::vector<int>>(
"paddings",
"The zero padding(height, width) size on both sides"
"Default {0,0}.")
.SetDefault({0, 0}); // TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.)
......@@ -135,6 +137,17 @@ output(Out, Mask) are in NCHW format. Where N is batch size, C is the
number of channels, H and W is the height and width of feature.
Parameters(ksize, strides, paddings) are two elements.
These two elements represent height and width, respectively.
The input(X) size and output(Out, Mask) size may be different.
Example:
Input:
X shape: (N, C, H_in, W_in)
Output:
Out shape: (N, C, H_out, W_out)
Mask shape: (N, C, H_out, W_out)
where
H_out = (H_in - ksize[0] + 2 * paddings[0]) / strides[0] + 1;
W_out = (W_in - ksize[1] + 2 * paddings[1]) / strides[1] + 1;
)DOC");
}
};
......@@ -146,18 +159,18 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput(
"X",
"The input tensor of pooling operator. "
"(Tensor) The input tensor of pooling operator. "
"The format of input tensor is NCDHW. Where N is batch size, C is "
"the number of channels, D, H and W is the depth, height and width of "
"image.");
AddOutput("Out",
"The output tensor of pooling operator."
"(Tensor) The output tensor of pooling operator."
"The format of output tensor is also NCDHW."
"Where N is batch size, C is "
"the number of channels, D, H and W is the depth, height and "
"width of image.");
AddOutput("Mask",
"The Mask tensor of pooling operator."
"(Tensor) The Mask tensor of pooling operator."
"The format of output tensor is also NCDHW."
"Where N is batch size, C is the number of channels, D, H and W "
"is the depth, height and width of image."
......@@ -165,7 +178,7 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<std::vector<int>>(
"ksize",
"The pooling size(depth, height, width) of pooling operator."
"The pooling window size(depth, height, width) of pooling operator."
"If globalPooling = true, ksize is ignored and need not be "
"specified."); // TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.)
......@@ -196,6 +209,18 @@ Input(X) and output(Out, Mask) are in NCDHW format. Where N is batch
size, C is the number of channels, D, H and W is the depth, height and
width of feature. Parameters(ksize, strides, paddings) are three elements.
These three elements represent depth, height and width, respectively.
The input(X) size and output(Out, Mask) size may be different.
Example:
Input:
X shape: (N, C, D_in, H_in, W_in)
Output:
Out shape: (N, C, D_out, H_out, W_out)
Mask shape: (N, C, D_out, H_out, W_out)
where
D_out = (D_in - ksize[0] + 2 * paddings[0]) / strides[0] + 1;
H_out = (H_in - ksize[1] + 2 * paddings[1]) / strides[1] + 1;
W_out = (W_in - ksize[2] + 2 * paddings[2]) / strides[2] + 1;
)DOC");
}
};
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/sequence_concat_op.h"
namespace paddle {
namespace operators {
class SequenceConcatOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInputs("X"),
"Inputs(X) of SequenceConcatOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SequenceConcatOp should not be null.");
const size_t level = static_cast<size_t>(ctx->Attrs().Get<int>("level"));
const size_t axis = static_cast<size_t>(ctx->Attrs().Get<int>("axis"));
PADDLE_ENFORCE(level == 0UL || level == 1UL,
"The sequence_concat operator only accepts sequence "
"or a nested sequence as its input.");
auto ins_dims = ctx->GetInputsDim("X");
framework::DDim out_dims = ins_dims[0];
const size_t n = ins_dims.size();
for (size_t i = 1; i < n; ++i) {
out_dims[axis] += ins_dims[i][axis];
}
ctx->SetOutputDim("Out", out_dims);
}
};
class SequenceConcatOpMaker : public framework::OpProtoAndCheckerMaker {
public:
SequenceConcatOpMaker(framework::OpProto* proto,
framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X",
"(A vector of LoDTensor), the input is a vector of LoDTensor, "
"each of which is a variable-length sequence or nested sequence.")
.AsDuplicable();
AddOutput("Out",
"(A LoDTensor), the variable-length output of "
"sequence_concat Op.");
AddAttr<int>("axis",
"(int, default 0)"
"The axis which the inputs will be joined with. "
"If axis is 0, the inputs will be joined with LoD index.")
.SetDefault(0);
AddAttr<int>("level",
"(int, default 0)"
"The level at which the inputs will be joined. "
"If the level is 0, the inputs will be joined at the nested "
"sequence level. "
"If the level is 1, the inputs will be joined at the "
"sequence level. "
"The level should be less than the level number of inputs.")
.SetDefault(0);
AddComment(R"DOC(
The sequence_concat operator concatenates multiple LoDTensors.
It only supports sequence (LoD Tensor with level number is 1)
or a nested sequence (LoD tensor with level number is 2) as its input.
- Case1:
If the axis is other than 0(here, axis is 1 and level is 1),
each input should have the same LoD information and the LoD
information of the output keeps the same as the input.
LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
LoD(x1) = {{0,2,4}, {0,1,2,3,4}}; Dims(x1) = (4,4,4)
LoD(Out) = {{0,2,4}, {0,1,2,3,4}}; Dims(Out) = (4,7,4)
- Case2:
If the axis is 0(here, leve is 0), the inputs are concatenated along
time steps, the LoD information of the output need to re-compute.
LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
LoD(x1) = {{0,3,5}, {0,1,2,3,5}}; Dims(x1) = (5,3,4)
LoD(Out) = {{0,5,9}, {0,1,2,3,4,5,6,7,9}}; Dims(Out) = (9,3,4)
- Case3:
If the axis is 0(here, level is 1).
LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
LoD(x1) = {{0,3,5}, {0,1,3,4,5}}; Dims(x1) = (5,3,4)
LoD(Out) = {{0,5,9}, {0,2,5,7,9}}; Dims(Out) = (9,3,4)
NOTE: The levels of all the inputs should be the same.
)DOC");
}
};
class SequenceConcatGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"The gradient of Out should not be null.");
PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName("X")),
"The gradient of X should not be null.");
ctx->SetOutputsDim(framework::GradVarName("X"), ctx->GetInputsDim("X"));
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(sequence_concat, ops::SequenceConcatOp, ops::SequenceConcatOpMaker,
sequence_concat_grad, ops::SequenceConcatGradOp);
REGISTER_OP_CPU_KERNEL(
sequence_concat,
ops::SequenceConcatOpKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(
sequence_concat_grad,
ops::SequenceConcatGradOpKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/sequence_concat_op.h"
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(
sequence_concat,
ops::SequenceConcatOpKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(
sequence_concat_grad,
ops::SequenceConcatGradOpKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/op_registry.h"
#include "paddle/operators/strided_memcpy.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
using LoD = framework::LoD;
template <typename T>
LoD concatLoD(const std::vector<const T*> ins, const size_t axis,
const size_t level) {
auto out_lod = ins[0]->lod();
const size_t n = ins.size();
if (axis == 0UL) {
for (size_t i = 1; i < n; ++i) {
for (size_t j = 0; j < ins[i]->lod()[0].size(); ++j) {
out_lod[0][j] += ins[i]->lod()[0][j];
}
if (ins[0]->NumLevels() == 2) {
for (size_t j = 1; j < ins[i]->lod()[1].size(); ++j) {
if (level == 0UL) {
out_lod[1].push_back(out_lod[1].back() + ins[i]->lod()[1][j] -
ins[i]->lod()[1][j - 1]);
} else if (level == 1UL) {
out_lod[1][j] += ins[1]->lod()[1][j];
}
}
}
}
}
return out_lod;
}
template <typename Place, typename T>
class SequenceConcatOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto ins = ctx.MultiInput<LoDTensor>("X");
auto* out = ctx.Output<LoDTensor>("Out");
const size_t axis = static_cast<size_t>(ctx.Attr<int>("axis"));
const size_t level = static_cast<size_t>(ctx.Attr<int>("level"));
const size_t n = ins.size();
for (size_t i = 1; i < n; ++i) {
PADDLE_ENFORCE_EQ(ins[0]->NumLevels(), ins[i]->NumLevels(),
"The levels of all the input LoDTensors "
"should be the same.");
PADDLE_ENFORCE_EQ(ins[0]->dims().size(), ins[i]->dims().size(),
"The dimension size of all the input LoDTensors "
"should be the same.");
const size_t dims_size = ins[i]->dims().size();
for (size_t j = 0; j < dims_size; ++j) {
if (j == axis) continue;
PADDLE_ENFORCE_EQ(ins[0]->dims()[j], ins[i]->dims()[j],
"Except for the dimension of the specified "
"axis along which all the inputs are concatenated, "
"dimensions of all the other axises of the input "
"LoDTensors should be the same.");
}
}
PADDLE_ENFORCE_GT(ins[0]->NumLevels(), level,
"The levels of all the input LoDTensors "
"should be greater than the specify level");
out->mutable_data<T>(ctx.GetPlace());
auto out_lod = concatLoD<LoDTensor>(ins, axis, level);
out->set_lod(out_lod);
auto out_lod_level = out_lod[level];
for (size_t i = 0; i < out_lod_level.size() - 1; ++i) {
Tensor out_t = out->Slice<T>(static_cast<int>(out_lod_level[i]),
static_cast<int>(out_lod_level[i + 1]));
auto out_stride = framework::stride(out_t.dims());
size_t offset = 0;
for (size_t j = 0; j < n; ++j) {
auto in_lod_level = ins[j]->lod()[level];
auto in_stride = framework::stride(ins[j]->dims());
Tensor in_t = ins[j]->Slice<T>(static_cast<int>(in_lod_level[i]),
static_cast<int>(in_lod_level[i + 1]));
size_t axis_dim = in_t.dims()[axis];
StridedMemcpy<T>(ctx.device_context(), in_t.data<T>(), in_stride,
in_t.dims(), out_stride, out_t.data<T>() + offset);
offset += axis_dim * in_stride[axis];
}
}
}
};
template <typename Place, typename T>
class SequenceConcatGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto ins = ctx.MultiInput<framework::LoDTensor>("X");
auto* out_grad =
ctx.Input<framework::LoDTensor>(framework::GradVarName("Out"));
auto x_grads =
ctx.MultiOutput<framework::LoDTensor>(framework::GradVarName("X"));
size_t axis = static_cast<size_t>(ctx.Attr<int>("axis"));
size_t level = static_cast<size_t>(ctx.Attr<int>("level"));
const size_t n = x_grads.size();
// Set Grad(X) LoD as X
for (size_t i = 0; i < n; i++) {
x_grads[i]->set_lod(ins[i]->lod());
x_grads[i]->mutable_data<T>(ctx.GetPlace());
}
auto out_lod = concatLoD<LoDTensor>(ins, axis, level);
auto out_lod_level = out_lod[level];
for (size_t i = 0; i < out_lod_level.size() - 1; ++i) {
Tensor out_grad_t =
out_grad->Slice<T>(static_cast<int>(out_lod_level[i]),
static_cast<int>(out_lod_level[i + 1]));
auto out_grad_stride = framework::stride(out_grad_t.dims());
size_t offset = 0;
for (size_t j = 0; j < n; ++j) {
auto x_grad_lod_level = x_grads[j]->lod()[level];
auto x_grad_stride = framework::stride(x_grads[j]->dims());
Tensor x_grad_t =
x_grads[j]->Slice<T>(static_cast<int>(x_grad_lod_level[i]),
static_cast<int>(x_grad_lod_level[i + 1]));
size_t axis_dim = x_grad_t.dims()[axis];
StridedMemcpy<T>(ctx.device_context(), out_grad_t.data<T>() + offset,
out_grad_stride, out_grad_t.dims(), x_grad_stride,
x_grad_t.data<T>());
offset += axis_dim * out_grad_stride[axis];
}
}
}
};
} // namespace operators
} // namespace paddle
file(GLOB proto_filenames . *.proto)
if (MOBILE_INFERENCE)
file(GLOB proto_filenames . ModelConfig.proto ParameterConfig.proto
TrainerConfig.proto DataConfig.proto)
else()
file(GLOB proto_filenames . *.proto)
endif()
include_directories(${CMAKE_CURRENT_BINARY_DIR})
proto_library(paddle_proto SRCS ${proto_filenames})
......
......@@ -321,7 +321,7 @@ class LayerOutput(object):
:param activation: Layer Activation.
:type activation: BaseActivation.
:param parents: Layer's parents.
:type parents: list|tuple|collections.Sequence
:type parents: list | tuple | collections.Sequence
"""
def __init__(self,
......@@ -438,7 +438,7 @@ def full_matrix_projection(input, size=0, param_attr=None):
size=100,
param_attr=ParamAttr(name='_proj'))
:param input: input layer
:param input: The input of this layer.
:type input: LayerOutput
:param size: The parameter size. Means the width of parameter.
:type size: int
......@@ -474,7 +474,7 @@ def trans_full_matrix_projection(input, size=0, param_attr=None):
initial_mean=0.0,
initial_std=0.01))
:param input: input layer
:param input: The input of this layer.
:type input: LayerOutput
:param size: The parameter size. Means the width of parameter.
:type size: int
......@@ -519,7 +519,7 @@ def table_projection(input, size=0, param_attr=None):
param_attr=ParamAttr(name='_proj'))
:param input: Input layer, which must contains id fields.
:param input: The input of this layer, which must contains id fields.
:type input: LayerOutput
:param size: The parameter size. Means the width of parameter.
:type size: int
......@@ -564,7 +564,7 @@ def identity_projection(input, offset=None, size=None):
Note that both of two projections should not have any parameter.
:param input: Input Layer.
:param input: The input of this layer.
:type input: LayerOutput
:param offset: Offset, None if use default.
:type offset: int
......@@ -599,7 +599,7 @@ def slice_projection(input, slices):
Note that slice_projection should not have any parameter.
:param input: Input Layer.
:param input: The input of this layer.
:type input: LayerOutput
:param slices: An array of slice parameters.
Each slice contains the start and end offsets based
......@@ -637,7 +637,7 @@ def scaling_projection(input, param_attr=None):
proj = scaling_projection(input=layer)
:param input: Input Layer.
:param input: The input of this layer.
:type input: LayerOutput
:param param_attr: Parameter config, None if use default.
:type param_attr: ParameterAttribute
......@@ -666,7 +666,7 @@ def dotmul_projection(input, param_attr=None):
proj = dotmul_projection(input=layer)
:param input: Input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param param_attr: Parameter config, None if use default.
:type param_attr: ParameterAttribute
......@@ -737,7 +737,7 @@ def context_projection(input,
after context projection and not set padding_attr, sequence will
be [ 0AB ABC BCD CDE DEF EFG FG0 ].
:param input: Input Sequence.
:param input: The input of this layer, which should be a sequence.
:type input: LayerOutput
:param context_len: context length.
:type context_len: int
......@@ -747,7 +747,7 @@ def context_projection(input,
:param padding_attr: Padding Parameter Attribute. If false, it means padding
always be zero. Otherwise Padding is learnable, and
parameter attribute is set by this parameter.
:type padding_attr: bool|ParameterAttribute
:type padding_attr: bool | ParameterAttribute
:return: Projection
:rtype: Projection
"""
......@@ -785,13 +785,13 @@ class MixedLayerType(LayerOutput):
:type name: basestring
:param size: layer size.
:type size: int
:param act: activation type.
:param act: Activation type.
:type act: BaseActivation
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute or None
"""
......@@ -883,15 +883,15 @@ def mixed_layer(size=0,
:type name: basestring
:param size: layer size.
:type size: int
:param input: inputs layer. It is an optional parameter. If set,
:param input: The input of this layer. It is an optional parameter. If set,
then this function will just return layer's name.
:param act: Activation Type.
:param act: Activation Type. LinearActivation is the default.
:type act: BaseActivation
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:param layer_attr: The extra layer config. Default is None.
:type layer_attr: ExtraLayerAttribute
:return: MixedLayerType object can add inputs or layer name.
......@@ -932,9 +932,9 @@ def data_layer(name, size, depth=None, height=None, width=None,
:param size: Size of this data layer.
:type size: int
:param height: Height of this data layer, used for image
:type height: int|None
:type height: int | None
:param width: Width of this data layer, used for image
:type width: int|None
:type width: int | None
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute.
:return: LayerOutput object.
......@@ -969,15 +969,15 @@ def embedding_layer(input, size, name=None, param_attr=None, layer_attr=None):
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer for this embedding. NOTE: must be Index Data.
:param input: The input of this layer, which must be Index Data.
:type input: LayerOutput
:param size: The embedding dimension.
:type size: int
:param param_attr: The embedding parameter attribute. See ParameterAttribute
for details.
:type param_attr: ParameterAttribute|None
:type param_attr: ParameterAttribute | None
:param layer_attr: Extra layer Config. Default is None.
:type layer_attr: ExtraLayerAttribute|None
:type layer_attr: ExtraLayerAttribute | None
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -1024,11 +1024,11 @@ def fc_layer(input,
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer. Could be a list/tuple of input layer.
:type input: LayerOutput|list|tuple
:param input: The input of this layer.
:type input: LayerOutput | list | tuple
:param size: The layer dimension.
:type size: int
:param act: Activation Type. Default is tanh.
:param act: Activation Type. TanhActivation is the default.
:type act: BaseActivation
:param param_attr: The Parameter Attribute|list.
:type param_attr: ParameterAttribute
......@@ -1036,9 +1036,9 @@ def fc_layer(input,
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:type layer_attr: ExtraLayerAttribute | None
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -1075,8 +1075,8 @@ def printer_layer(input, format=None, name=None):
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer. Could be a list/tuple of input layer.
:type input: LayerOutput|list|tuple
:param input: The input of this layer.
:type input: LayerOutput | list | tuple
:return: LayerOutput
"""
if isinstance(input, LayerOutput):
......@@ -1113,7 +1113,7 @@ def priorbox_layer(input,
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param image: The network input image.
:type image: LayerOutput
......@@ -1309,7 +1309,7 @@ def cross_channel_norm_layer(input, name=None, param_attr=None):
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param param_attr: The Parameter Attribute|list.
:type param_attr: ParameterAttribute
......@@ -1374,20 +1374,20 @@ def pooling_layer(input,
:type agg_level: AggregateLevel
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: input layer name.
:param input: The input of this layer.
:type input: LayerOutput
:param pooling_type: Type of pooling, MaxPooling(default), AvgPooling,
SumPooling, SquareRootNPooling.
:type pooling_type: BasePoolingType|None
:type pooling_type: BasePoolingType | None
:param stride: The step size between successive pooling regions.
:type stride: Int
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:param layer_attr: The Extra Attributes for layer, such as dropout.
:type layer_attr: ExtraLayerAttribute|None
:type layer_attr: ExtraLayerAttribute | None
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -1472,11 +1472,11 @@ def lstmemory(input,
:type name: basestring
:param size: DEPRECATED. size of the lstm cell
:type size: int
:param input: input layer name.
:param input: The input of this layer.
:type input: LayerOutput
:param reverse: is sequence process reversed or not.
:type reverse: bool
:param act: activation type, TanhActivation by default. :math:`h_t`
:param act: Activation type. TanhActivation is the default. :math:`h_t`
:type act: BaseActivation
:param gate_act: gate activation type, SigmoidActivation by default.
:type gate_act: BaseActivation
......@@ -1486,11 +1486,11 @@ def lstmemory(input,
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:param param_attr: Parameter Attribute.
:type param_attr: ParameterAttribute|None|False
:type param_attr: ParameterAttribute | None | False
:param layer_attr: Extra Layer attribute
:type layer_attr: ExtraLayerAttribute|None
:type layer_attr: ExtraLayerAttribute | None
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -1594,14 +1594,14 @@ def grumemory(input,
gru = grumemory(input)
:param name: The gru layer name.
:type name: None|basestring
:param input: input layer.
:type name: None | basestring
:param input: The input of this layer.
:type input: LayerOutput.
:param size: DEPRECATED. size of the gru cell
:type size: int
:param reverse: Whether sequence process is reversed or not.
:type reverse: bool
:param act: activation type, TanhActivation by default. This activation
:param act: Activation type, TanhActivation is the default. This activation
affects the :math:`{\\tilde{h_t}}`.
:type act: BaseActivation
:param gate_act: gate activation type, SigmoidActivation by default.
......@@ -1612,11 +1612,11 @@ def grumemory(input,
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:param param_attr: Parameter Attribute.
:type param_attr: ParameterAttribute|None|False
:type param_attr: ParameterAttribute | None | False
:param layer_attr: Extra Layer attribute
:type layer_attr: ExtraLayerAttribute|None
:type layer_attr: ExtraLayerAttribute | None
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -1673,7 +1673,7 @@ def last_seq(input,
:param agg_level: Aggregated level
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: Input layer name.
:param input: The input of this layer.
:type input: LayerOutput
:param stride: The step size between successive pooling regions.
:type stride: Int
......@@ -1729,7 +1729,7 @@ def first_seq(input,
:param agg_level: aggregation level
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: Input layer name.
:param input: The input of this layer.
:type input: LayerOutput
:param stride: The step size between successive pooling regions.
:type stride: Int
......@@ -1802,7 +1802,7 @@ def expand_layer(input,
expand_as=layer2,
expand_level=ExpandLevel.FROM_NO_SEQUENCE)
:param input: Input layer
:param input: The input of this layer.
:type input: LayerOutput
:param expand_as: Expand as this layer's sequence info.
:type expand_as: LayerOutput
......@@ -1812,7 +1812,7 @@ def expand_layer(input,
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:param expand_level: whether input layer is timestep(default) or sequence.
:type expand_level: ExpandLevel
:param layer_attr: extra layer attributes.
......@@ -1861,7 +1861,7 @@ def repeat_layer(input,
expand = repeat_layer(input=layer, num_repeats=4)
:param input: Input layer
:param input: The input of this layer.
:type input: LayerOutput
:param num_repeats: Repeat the input so many times
:type num_repeats: int
......@@ -1872,7 +1872,7 @@ def repeat_layer(input,
False for treating input as column vector and repeating
in the row direction.
:type as_row_vector: bool
:param act: Activation type.
:param act: Activation type. IdentityActivation is the default.
:type act: BaseActivation
:type name: basestring
:param layer_attr: extra layer attributes.
......@@ -1920,13 +1920,13 @@ def seq_reshape_layer(input,
reshape = seq_reshape_layer(input=layer, reshape_size=4)
:param input: Input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param reshape_size: the size of reshaped sequence.
:type reshape_size: int
:param name: The name of this layer. It is optional.
:type name: basestring
:param act: Activation type.
:param act: Activation type. IdentityActivation is the default.
:type act: BaseActivation
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
......@@ -1934,7 +1934,7 @@ def seq_reshape_layer(input,
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -1973,8 +1973,8 @@ def interpolation_layer(input, weight, name=None, layer_attr=None):
interpolation = interpolation_layer(input=[layer1, layer2], weight=layer3)
:param input: Input layer.
:type input: list|tuple
:param input: The input of this layer.
:type input: list | tuple
:param weight: Weight layer.
:type weight: LayerOutput
:param name: The name of this layer. It is optional.
......@@ -2026,11 +2026,11 @@ def bilinear_interp_layer(input,
:param input: A input layer.
:type input: LayerOutput.
:param out_size_x: bilinear interpolation output width.
:type out_size_x: int|None
:type out_size_x: int | None
:param out_size_y: bilinear interpolation output height.
:type out_size_y: int|None
:type out_size_y: int | None
:param name: The layer's name, which cna not be specified.
:type name: None|basestring
:type name: None | basestring
:param layer_attr: Extra Layer attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
......@@ -2078,7 +2078,7 @@ def power_layer(input, weight, name=None, layer_attr=None):
power = power_layer(input=layer1, weight=layer2)
:param input: Input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param weight: Weight layer.
:type weight: LayerOutput
......@@ -2122,7 +2122,7 @@ def scaling_layer(input, weight, name=None, layer_attr=None):
scale = scaling_layer(input=layer1, weight=layer2)
:param input: Input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param weight: Weight layer.
:type weight: LayerOutput
......@@ -2162,7 +2162,7 @@ def trans_layer(input, name=None, layer_attr=None):
trans = trans_layer(input=layer)
:param input: Input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param name: The name of this layer. It is optional.
:type name: basestring
......@@ -2200,7 +2200,7 @@ def rotate_layer(input, height, width, name=None, layer_attr=None):
height=100,
width=100)
:param input: Input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param height: The height of the sample matrix
:type height: int
......@@ -2309,22 +2309,21 @@ def hsigmoid(input,
cost = hsigmoid(input=[layer1, layer2],
label=data_layer)
:param input: Input layers. It could be a LayerOutput or list/tuple of
LayerOutput.
:type input: LayerOutput|list|tuple
:param input: The input of this layer.
:type input: LayerOutput | list | tuple
:param label: Label layer.
:type label: LayerOutput
:param num_classes: number of classes.
:type num_classes: int|None
:type num_classes: int | None
:param name: The name of this layer. It is optional.
:type name: basestring
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:param param_attr: Parameter Attribute. None means default parameter.
:type param_attr: ParameterAttribute|None
:type param_attr: ParameterAttribute | None
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
......@@ -2432,40 +2431,40 @@ def img_conv_layer(input,
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: Layer Input.
:param input: The input of this layer.
:type input: LayerOutput
:param filter_size: The x dimension of a filter kernel. Or input a tuple for
two image dimension.
:type filter_size: int|tuple|list
:type filter_size: int | tuple | list
:param filter_size_y: The y dimension of a filter kernel. Since PaddlePaddle
currently supports rectangular filters, the filter's
shape will be (filter_size, filter_size_y).
:type filter_size_y: int|None
:type filter_size_y: int | None
:param num_filters: Each filter group's number of filter
:param act: Activation type. Default is tanh
:param act: Activation type. ReluActivation is the default.
:type act: BaseActivation
:param groups: Group size of filters.
:type groups: int
:param stride: The x dimension of the stride. Or input a tuple for two image
dimension.
:type stride: int|tuple|list
:type stride: int | tuple | list
:param stride_y: The y dimension of the stride.
:type stride_y: int
:param padding: The x dimension of the padding. Or input a tuple for two
image dimension
:type padding: int|tuple|list
:type padding: int | tuple | list
:param padding_y: The y dimension of the padding.
:type padding_y: int
:param dilation: The x dimension of the dilation. Or input a tuple for two
image dimension
:type dilation: int|tuple|list
:type dilation: int | tuple | list
:param dilation_y: The y dimension of the dilation.
:type dilation_y: int
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:param num_channels: number of input channels. If None will be set
automatically from previous output.
:type num_channels: int
......@@ -2619,15 +2618,15 @@ def img_pool_layer(input,
:param padding: pooling padding width.
:type padding: int
:param padding_y: pooling padding height. It's equal to padding by default.
:type padding_y: int|None
:type padding_y: int | None
:param name: name of pooling layer
:type name: basestring.
:param input: layer's input
:param input: The input of this layer.
:type input: LayerOutput
:param pool_size: pooling window width
:type pool_size: int
:param pool_size_y: pooling window height. It's eaqual to pool_size by default.
:type pool_size_y: int|None
:type pool_size_y: int | None
:param num_channels: number of input channel.
:type num_channels: int
:param pool_type: pooling type. MaxPooling or AvgPooling. Default is
......@@ -2636,7 +2635,7 @@ def img_pool_layer(input,
:param stride: stride width of pooling.
:type stride: int
:param stride_y: stride height of pooling. It is equal to stride by default.
:type stride_y: int|None
:type stride_y: int | None
:param layer_attr: Extra Layer attribute.
:type layer_attr: ExtraLayerAttribute
:param ceil_mode: Wether to use ceil mode to calculate output height and with.
......@@ -2746,20 +2745,20 @@ def img_pool3d_layer(input,
pool_type=MaxPooling())
:param padding: pooling padding width.
:type padding: int|tuple|list
:type padding: int | tuple | list
:param name: name of pooling layer
:type name: basestring.
:param input: layer's input
:param input: The input of this layer.
:type input: LayerOutput
:param pool_size: pooling window width
:type pool_size: int|tuple|list
:type pool_size: int | tuple | list
:param num_channels: number of input channel.
:type num_channels: int
:param pool_type: pooling type. MaxPooling or AvgPooling. Default is
MaxPooling.
:type pool_type: BasePoolingType
:param stride: stride width of pooling.
:type stride: int|tuple|list
:type stride: int | tuple | list
:param layer_attr: Extra Layer attribute.
:type layer_attr: ExtraLayerAttribute
:param ceil_mode: Wether to use ceil mode to calculate output height and with.
......@@ -2858,7 +2857,7 @@ def spp_layer(input,
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: layer's input.
:param input: The input of this layer.
:type input: LayerOutput
:param num_channels: number of input channel.
:type num_channels: int
......@@ -2951,8 +2950,8 @@ def img_cmrnorm_layer(input,
norm = img_cmrnorm_layer(input=net, size=5)
:param name: The name of this layer. It is optional.
:type name: None|basestring
:param input: layer's input.
:type name: None | basestring
:param input: The input of this layer.
:type input: LayerOutput
:param size: Normalize in number of :math:`size` feature maps.
:type size: int
......@@ -3027,7 +3026,7 @@ def batch_norm_layer(input,
batch_norm for CPU. Otherwise, select batch norm
type based on the specified type. If you use cudnn_batch_norm,
we suggested you use latest version, such as v5.1.
:type batch_norm_type: None|string, None or "batch_norm" or "cudnn_batch_norm"
:type batch_norm_type: None | string, None or "batch_norm" or "cudnn_batch_norm"
:param act: Activation Type. Better be relu. Because batch
normalization will normalize input near zero.
:type act: BaseActivation
......@@ -3037,7 +3036,7 @@ def batch_norm_layer(input,
:type num_channels: int
:param bias_attr: :math:`\\beta`, better be zero when initialize. So the
initial_std=0, initial_mean=1 is best practice.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:param param_attr: :math:`\\gamma`, better be one when initialize. So the
initial_std=0, initial_mean=1 is best practice.
:type param_attr: ParameterAttribute
......@@ -3049,7 +3048,7 @@ def batch_norm_layer(input,
testing. If False, it will use the mean
and variance of current batch of test data for
testing.
:type use_global_stats: bool|None.
:type use_global_stats: bool | None.
:param moving_average_fraction: Factor used in the moving average
computation, referred to as facotr,
:math:`runningMean = newMean*(1-factor)
......@@ -3110,7 +3109,7 @@ def sum_to_one_norm_layer(input, name=None, layer_attr=None):
sum_to_one_norm = sum_to_one_norm_layer(input=layer)
:param input: Input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param name: The name of this layer. It is optional.
:type name: basestring
......@@ -3146,7 +3145,7 @@ def row_l2_norm_layer(input, name=None, layer_attr=None):
row_l2_norm_layer = row_l2_norm_layer(input=layer)
:param input: Input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param name: The name of this layer. It is optional.
:type name: basestring
......@@ -3204,14 +3203,14 @@ def addto_layer(input, act=None, name=None, bias_attr=None, layer_attr=None):
:type name: basestring
:param input: Input layers. It could be a LayerOutput or list/tuple of
LayerOutput.
:type input: LayerOutput|list|tuple
:param act: Activation Type, default is tanh.
:type input: LayerOutput | list | tuple
:param act: Activation Type. LinearActivation is the default.
:type act: BaseActivation
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:param layer_attr: Extra Layer attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
......@@ -3263,8 +3262,8 @@ def concat_layer(input, act=None, name=None, layer_attr=None, bias_attr=None):
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: input layers or projections
:type input: list|tuple|collections.Sequence
:param act: Activation type.
:type input: list | tuple | collections.Sequence
:param act: Activation type. IdentityActivation is the default.
:type act: BaseActivation
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
......@@ -3359,7 +3358,7 @@ def seq_concat_layer(a, b, act=None, name=None, layer_attr=None,
:type a: LayerOutput
:param b: input sequence layer
:type b: LayerOutput
:param act: Activation type.
:param act: Activation type. IdentityActivation is the default.
:type act: BaseActivation
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
......@@ -3367,7 +3366,7 @@ def seq_concat_layer(a, b, act=None, name=None, layer_attr=None,
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -3443,9 +3442,9 @@ def memory(name,
:param is_seq: DEPRECATED. is sequence for boot_layer
:type is_seq: bool
:param boot_layer: boot layer of memory.
:type boot_layer: LayerOutput|None
:type boot_layer: LayerOutput | None
:param boot_bias: boot layer's bias
:type boot_bias: ParameterAttribute|None
:type boot_bias: ParameterAttribute | None
:param boot_bias_active_type: boot layer's active type.
:type boot_bias_active_type: BaseActivation
:param boot_with_const_id: boot layer's id.
......@@ -3540,19 +3539,17 @@ def lstm_step_layer(input,
:type input: LayerOutput
:param state: State Layer. :math:`c_{t-1}`
:type state: LayerOutput
:param act: Activation type. Default is tanh
:param act: Activation type. TanhActivation is the default.
:type act: BaseActivation
:param gate_act: Gate Activation Type. Default is sigmoid, and should
be sigmoid only.
:param gate_act: Gate Activation Type. SigmoidActivation is the default.
:type gate_act: BaseActivation
:param state_act: State Activation Type. Default is sigmoid, and should
be sigmoid only.
:param state_act: State Activation Type. TanhActivation is the default.
:type state_act: BaseActivation
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:param layer_attr: layer's extra attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
......@@ -3603,13 +3600,15 @@ def gru_step_layer(input,
:param output_mem:
:param size:
:param act:
:type act: BaseActivation
:param name: The name of this layer. It is optional.
:param gate_act:
:param gate_act: Activation type of this layer's two gates. Default is Sigmoid.
:type gate_act: BaseActivation
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:param param_attr: the parameter_attribute for transforming the output_mem
from previous step.
:param layer_attr:
......@@ -3665,12 +3664,14 @@ def gru_step_naive_layer(input,
:param size:
:param name: The name of this layer. It is optional.
:param act:
:param gate_act:
:type act: BaseActivation
:param gate_act: Activation type of this layer's two gates. Default is Sigmoid.
:type gate_act: BaseActivation
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:param param_attr:
:param layer_attr:
:return:
......@@ -3789,15 +3790,15 @@ def recurrent_layer(input,
out_{i} = act(in_{i} + out_{i+1} * W) \\ \\ \\text{for} \\ start <= i < end
:param input: Input Layer
:param input: The input of this layer.
:type input: LayerOutput
:param act: activation.
:param act: Activation type. TanhActivation is the default.
:type act: BaseActivation
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:param param_attr: parameter attribute.
:type param_attr: ParameterAttribute
:param name: The name of this layer. It is optional.
......@@ -3904,7 +3905,7 @@ def recurrent_group(step, input, reverse=False, name=None, targetInlink=None):
StaticInput will be imported to each time step, and doesn't change
through time. It's a mechanism to access layer outside step function.
:type input: LayerOutput|StaticInput|SubsequenceInput|list|tuple
:type input: LayerOutput | StaticInput | SubsequenceInput | list | tuple
:param reverse: If reverse is set true, the recurrent unit will process the
input sequence in a reverse order.
......@@ -3919,7 +3920,7 @@ def recurrent_group(step, input, reverse=False, name=None, targetInlink=None):
of words in each sentence) with all layer group's outputs.
targetInlink should be one of the layer group's input.
:type targetInlink: LayerOutput|SubsequenceInput
:type targetInlink: LayerOutput | SubsequenceInput
:return: LayerOutput object.
:rtype: LayerOutput
......@@ -4037,7 +4038,7 @@ def maxid_layer(input, name=None, layer_attr=None):
maxid = maxid_layer(input=layer)
:param input: Input layer name.
:param input: The input of this layer.
:type input: LayerOutput
:param name: The name of this layer. It is optional.
:type name: basestring
......@@ -4115,7 +4116,7 @@ def eos_layer(input, eos_id, name=None, layer_attr=None):
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: Input layer name.
:param input: The input of this layer.
:type input: LayerOutput
:param eos_id: end id of sequence
:type eos_id: int
......@@ -4507,7 +4508,7 @@ def conv_projection(input,
num_filters=64,
num_channels=64)
:param input: input layer
:param input: The input of this layer.
:type input: LayerOutput
:param filter_size: The x dimension of a filter kernel.
:type filter_size: int
......@@ -4532,7 +4533,7 @@ def conv_projection(input,
:param param_attr: Convolution param attribute. None means default attribute
:type param_attr: ParameterAttribute
:param trans: whether it is convTrans or conv
:type trans: boolean
:type trans: bool
:return: A DotMulProjection Object.
:rtype: DotMulProjection
"""
......@@ -4640,14 +4641,14 @@ def pad_layer(input,
pad_h=[0,0],
pad_w=[2,2])
:param input: layer's input.
:param input: The input of this layer.
:type input: LayerOutput
:param pad_c: padding size in channel dimension.
:type pad_c: list|None
:type pad_c: list | None
:param pad_h: padding size in height dimension.
:type pad_h: list|None
:type pad_h: list | None
:param pad_w: padding size in width dimension.
:type pad_w: list|None
:type pad_w: list | None
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:param name: The name of this layer. It is optional.
......@@ -4782,7 +4783,7 @@ def tensor_layer(a,
:type b: LayerOutput
:param size: the layer dimension.
:type size: int.
:param act: Activation Type. Default is tanh.
:param act: Activation type. LinearActivation is the default.
:type act: BaseActivation
:param param_attr: The Parameter Attribute.
:type param_attr: ParameterAttribute
......@@ -4790,9 +4791,9 @@ def tensor_layer(a,
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:type layer_attr: ExtraLayerAttribute | None
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -4839,15 +4840,15 @@ def selective_fc_layer(input,
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer.
:type input: LayerOutput|list|tuple
:param input: The input of this layer.
:type input: LayerOutput | list | tuple
:param select: The select layer. The output of select layer should be a
sparse binary matrix, and treat as the mask of selective fc.
If is None, acts exactly like fc_layer.
:type select: LayerOutput
:param size: The layer dimension.
:type size: int
:param act: Activation Type. Default is tanh.
:param act: Activation type. TanhActivation is the default.
:type act: BaseActivation
:param param_attr: The Parameter Attribute.
:type param_attr: ParameterAttribute
......@@ -4855,9 +4856,9 @@ def selective_fc_layer(input,
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:type layer_attr: ExtraLayerAttribute | None
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -4909,12 +4910,12 @@ def sampling_id_layer(input, name=None, layer_attr=None):
samping_id = sampling_id_layer(input=input)
:param input: The input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param name: The name of this layer. It is optional.
:type name: basestring
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:type layer_attr: ExtraLayerAttribute | None
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -4947,7 +4948,7 @@ def slope_intercept_layer(input,
scale = slope_intercept_layer(input=input, slope=-1.0, intercept=1.0)
:param input: The input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param name: The name of this layer. It is optional.
:type name: basestring
......@@ -4956,7 +4957,7 @@ def slope_intercept_layer(input,
:param intercept: the offset.
:type intercept: float.
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:type layer_attr: ExtraLayerAttribute | None
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -5016,7 +5017,7 @@ def linear_comb_layer(weights, vectors, size=None, name=None, layer_attr=None):
:param name: The name of this layer. It is optional.
:type name: basestring
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:type layer_attr: ExtraLayerAttribute | None
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -5080,10 +5081,10 @@ def block_expand_layer(input,
block_x=1,
block_x=3)
:param input: The input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param num_channels: The channel number of input layer.
:type num_channels: int|None
:type num_channels: int | None
:param block_x: The width of sub block.
:type block_x: int
:param block_y: The width of sub block.
......@@ -5097,9 +5098,9 @@ def block_expand_layer(input,
:param padding_y: The padding size in vertical direction.
:type padding_y: int
:param name: The name of this layer. It is optional.
:type name: None|basestring.
:type name: None | basestring.
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:type layer_attr: ExtraLayerAttribute | None
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -5158,15 +5159,15 @@ def maxout_layer(input, groups, num_channels=None, name=None, layer_attr=None):
num_channels=128,
groups=4)
:param input: The input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param num_channels: The channel number of input layer. If None will be set
automatically from previous output.
:type num_channels: int|None
:type num_channels: int | None
:param groups: The group number of input layer.
:type groups: int
:param name: The name of this layer. It is optional.
:type name: None|basestring.
:type name: None | basestring.
:param layer_attr: Extra Layer attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
......@@ -5223,18 +5224,18 @@ def ctc_layer(input,
size=9055,
norm_by_times=True)
:param input: The input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param label: The data layer of label with variable length.
:type label: LayerOutput
:param size: category numbers + 1.
:type size: int
:param name: The name of this layer. It is optional.
:type name: basestring|None
:type name: basestring | None
:param norm_by_times: Whether to normalization by times. False by default.
:type norm_by_times: bool
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:type layer_attr: ExtraLayerAttribute | None
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -5300,20 +5301,20 @@ def warp_ctc_layer(input,
blank=1000,
norm_by_times=False)
:param input: The input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param label: The data layer of label with variable length.
:type label: LayerOutput
:param size: category numbers + 1.
:type size: int
:param name: The name of this layer. It is optional.
:type name: basestring|None
:type name: basestring | None
:param blank: the 'blank' label used in ctc
:type blank: int
:param norm_by_times: Whether to normalization by times. False by default.
:type norm_by_times: bool
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:type layer_attr: ExtraLayerAttribute | None
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -5371,11 +5372,11 @@ def crf_layer(input,
:param param_attr: Parameter attribute. None means default attribute
:type param_attr: ParameterAttribute
:param name: The name of this layer. It is optional.
:type name: None|basestring
:type name: None | basestring
:param coeff: The coefficient affects the gradient in the backward.
:type coeff: float
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:type layer_attr: ExtraLayerAttribute | None
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -5441,9 +5442,9 @@ def crf_decoding_layer(input,
:param param_attr: Parameter attribute. None means default attribute
:type param_attr: ParameterAttribute
:param name: The name of this layer. It is optional.
:type name: None|basestring
:type name: None | basestring
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:type layer_attr: ExtraLayerAttribute | None
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -5502,14 +5503,14 @@ def nce_layer(input,
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layers. It could be a LayerOutput of list/tuple of LayerOutput.
:type input: LayerOutput|list|tuple|collections.Sequence
:type input: LayerOutput | list | tuple | collections.Sequence
:param label: label layer
:type label: LayerOutput
:param weight: weight layer, can be None(default)
:type weight: LayerOutput
:param num_classes: number of classes.
:type num_classes: int
:param act: Activation, default is Sigmoid.
:param act: Activation type. SigmoidActivation is the default.
:type act: BaseActivation
:param param_attr: The Parameter Attribute|list.
:type param_attr: ParameterAttribute
......@@ -5518,12 +5519,12 @@ def nce_layer(input,
:param neg_distribution: The distribution for generating the random negative labels.
A uniform distribution will be used if not provided.
If not None, its length must be equal to num_classes.
:type neg_distribution: list|tuple|collections.Sequence|None
:type neg_distribution: list | tuple | collections.Sequence | None
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: layer name.
......@@ -5639,7 +5640,7 @@ def rank_cost(left,
It is an optional argument.
:type weight: LayerOutput
:param name: The name of this layer. It is optional.
:type name: None|basestring
:type name: None | basestring
:param coeff: The coefficient affects the gradient in the backward.
:type coeff: float
:param layer_attr: Extra Layer Attribute.
......@@ -5704,7 +5705,7 @@ def lambda_cost(input,
entire list of get gradient.
:type max_sort_size: int
:param name: The name of this layer. It is optional.
:type name: None|basestring
:type name: None | basestring
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
......@@ -5748,7 +5749,7 @@ def cross_entropy(input,
:param label: The input label.
:type input: LayerOutput.
:param name: The name of this layer. It is optional.
:type name: None|basestring.
:type name: None | basestring.
:param coeff: The cost is multiplied with coeff.
The coefficient affects the gradient in the backward.
:type coeff: float.
......@@ -5796,7 +5797,7 @@ def cross_entropy_with_selfnorm(input,
:param label: The input label.
:type input: LayerOutput.
:param name: The name of this layer. It is optional.
:type name: None|basestring.
:type name: None | basestring.
:param coeff: The coefficient affects the gradient in the backward.
:type coeff: float.
:param softmax_selfnorm_alpha: The scale factor affects the cost.
......@@ -5833,10 +5834,10 @@ def sum_cost(input, name=None, layer_attr=None):
cost = sum_cost(input=input_layer)
:param input: The first input layer.
:param input: The input of this layer.
:type input: LayerOutput.
:param name: The name of this layer. It is optional.
:type name: None|basestring.
:type name: None | basestring.
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
......@@ -5881,7 +5882,7 @@ def huber_regression_cost(input,
:param label: The input label.
:type input: LayerOutput.
:param name: The name of this layer. It is optional.
:type name: None|basestring.
:type name: None | basestring.
:param delta: The difference between the observed and predicted values.
:type delta: float.
:param coeff: The coefficient affects the gradient in the backward.
......@@ -5931,7 +5932,7 @@ def huber_classification_cost(input,
:param label: The input label.
:type input: LayerOutput.
:param name: The name of this layer. It is optional.
:type name: None|basestring.
:type name: None | basestring.
:param coeff: The coefficient affects the gradient in the backward.
:type coeff: float.
:param layer_attr: Extra Layer Attribute.
......@@ -5974,7 +5975,7 @@ def multi_binary_label_cross_entropy(input,
:param label: The input label.
:type input: LayerOutput
:param name: The name of this layer. It is optional.
:type name: None|basestring
:type name: None | basestring
:param coeff: The coefficient affects the gradient in the backward.
:type coeff: float
:param layer_attr: Extra Layer Attribute.
......@@ -6142,7 +6143,7 @@ def smooth_l1_cost(input, label, name=None, coeff=1.0, layer_attr=None):
:param label: The input label.
:type input: LayerOutput
:param name: The name of this layer. It is optional.
:type name: None|basestring
:type name: None | basestring
:param coeff: The coefficient affects the gradient in the backward.
:type coeff: float
:param layer_attr: Extra Layer Attribute.
......@@ -6229,7 +6230,7 @@ def dropout_layer(input, dropout_rate, name=None):
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param dropout_rate: The probability of dropout.
:type dropout_rate: float
......@@ -6288,18 +6289,18 @@ def row_conv_layer(input,
row_conv = row_conv_layer(input=input_layer, context_len=3)
:param input: The input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param context_len: The context length equals the lookahead step number
plus one.
:type context_len: int
:param act: Activation Type. Default is linear activation.
:param act: Activation Type. LinearActivation is the default.
:type act: BaseActivation
:param param_attr: The Parameter Attribute. If None, the parameter will be
initialized smartly. It's better to set it by yourself.
:type param_attr: ParameterAttribute
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:type layer_attr: ExtraLayerAttribute | None
:return: LayerOutput object.
:rtype: LayerOutput
......@@ -6345,7 +6346,7 @@ def prelu_layer(input,
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param partial_sum: this parameter makes a group of inputs share a same weight.
......@@ -6355,9 +6356,9 @@ def prelu_layer(input,
:type partial_sum: int
:param param_attr: The parameter attribute. See ParameterAttribute for details.
:type param_attr: ParameterAttribute|None
:type param_attr: ParameterAttribute | None
:param layer_attr: Extra layer configurations. Default is None.
:type layer_attr: ExtraLayerAttribute|None
:type layer_attr: ExtraLayerAttribute | None
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -6410,37 +6411,37 @@ def gated_unit_layer(input,
.. code-block:: python
gated_unit = gated_unit_layer(size=128, input=input_layer))
:param input: input for this layer.
:param input: The input of this layer.
:type input: LayerOutput
:param size: output size of the gated unit.
:type size: int
:param act: activation type of the projected input.
:param act: Activation type of the projected input. LinearActivation is the default.
:type act: BaseActivation
:param name: The name of this layer. It is optional.
:type name: basestring
:param gate_attr: Attributes to tune the gate output, for example, error
clipping threshold, dropout and so on. See ExtraLayerAttribute for
more details.
:type gate_attr: ExtraLayerAttribute|None
:type gate_attr: ExtraLayerAttribute | None
:param gate_param_attr: Attributes to tune the learnable projected matrix
parameter of the gate.
:type gate_param_attr: ParameterAttribute|None
:type gate_param_attr: ParameterAttribute | None
:param gate_bias_attr: Attributes to tune the learnable bias of the gate.
:type gate_bias_attr: ParameterAttribute|None
:type gate_bias_attr: ParameterAttribute | None
:param inproj_attr: Attributes to the tune the projected input, for
example, error clipping threshold, dropout and so on. See
ExtraLayerAttribute for more details.
:type inproj_attr: ExtraLayerAttribute|None
:type inproj_attr: ExtraLayerAttribute | None
:param inproj_param_attr: Attributes to tune the learnable parameter of
the projection of input.
:type inproj_param_attr: ParameterAttribute|None
:type inproj_param_attr: ParameterAttribute | None
:param inproj_bias_attr: Attributes to tune the learnable bias of
projection of the input.
:type inproj_bias_attr: ParameterAttribute|None
:type inproj_bias_attr: ParameterAttribute | None
:param layer_attr: Attributes to tune the final output of the gated unit,
for example, error clipping threshold, dropout and so on. See
ExtraLayerAttribute for more details.
:type layer_attr: ExtraLayerAttribute|None
:type layer_attr: ExtraLayerAttribute | None
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -6490,7 +6491,7 @@ def switch_order_layer(input,
switch = switch_order(input=layer, name='switch', reshape_axis=reshape_axis)
reshape = {'height':[ 0, 1, 2], 'width':[3]}
:param input: The input layer.
:param input: The input of this layer.
:type input: LayerOutput
:param name: The name of this layer. It is optional.
:type name: basestring
......@@ -6524,7 +6525,7 @@ def switch_order_layer(input,
@layer_support()
def crop_layer(input, offset, axis=2, shape=None, name=None, layer_attr=None):
"""
The crop layer crops images by offset and shape. User can set crop shape by
This layer crops images by offset and shape. User can set crop shape by
args 'shape' explicitly or by reference input layer.
The example usage is:
......@@ -6532,10 +6533,10 @@ def crop_layer(input, offset, axis=2, shape=None, name=None, layer_attr=None):
.. code-block:: python
crop = crop_layer(input=[image_input, reference_input], axis=2, offset=[2, 3])
:param input: The input layer.If two inputs were setted,
the second input will be regarded as reference input
:type input: LayerOutput or Sequence
:param offset: The crop offset
:param input: The input of this layer. If two inputs are given, the second input
will be regarded as reference input.
:type input: LayerOutput | Sequence
:param offset: The crop offset.
:type offset: Sequence
:param axis: start axis to be cropped. To image input layer:
- 0: batch size
......@@ -6584,12 +6585,12 @@ def sub_nested_seq_layer(input, selected_indices, name=None):
.. code-block:: python
sub_nest_seq = sub_nested_seq_layer(input=[data, selected_indices])
sub_nest_seq = sub_nested_seq_layer(input=data, selected_indices=selected_ids)
:param input: A nested sequence.
:param input: The input of this layer. It is a nested sequence.
:type input: LayerOutput
:param selected_indices: a set of sequence indices in the nested sequence.
:param selected_indices: A set of sequence indices in the nested sequence.
:type input: LayerOutput
:param name: The name of this layer. It is optional.
:type name: basestring
......@@ -6631,7 +6632,7 @@ def clip_layer(input, min, max, name=None):
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer.
:param input: The input of this layer.
:type input: LayerOutput.
:param min: The lower threshold for clipping.
:type min: double
......@@ -6676,12 +6677,12 @@ def seq_slice_layer(input, starts, ends, name=None):
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: input for this layer, it should be a sequence.
:param input: The input of this layer, which should be a sequence.
:type input: LayerOutput
:param starts: start indices to slice the input sequence.
:type starts: LayerOutput|None
:type starts: LayerOutput | None
:param ends: end indices to slice the input sequence.
:type ends: LayerOutput|None
:type ends: LayerOutput | None
:return: LayerOutput object.
:rtype: LayerOutput
......@@ -6730,9 +6731,9 @@ def kmax_seq_score_layer(input, name=None, beam_size=1):
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer. It stores scores over a sequence or a nested
:param input: The input of this layer. It stores scores over a sequence or a nested
sequence and its size must be 1.
:type input: LayerOutput.
:type input: LayerOutput
:param beam_size: sequence indices with top beam_size scores are returned.
:type beam_size: double
:return: LayerOutput object.
......@@ -6788,24 +6789,24 @@ def img_conv3d_layer(input,
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: Layer Input.
:param input: The input of this layer.
:type input: LayerOutput
:param filter_size: The x dimension of a filter kernel. Or input a list.
:type filter_size: int|tuple|list
:type filter_size: int | tuple | list
:param num_filters: Each filter group's number of filter
:param act: Activation type. Default is tanh
:param act: Activation type. ReluActivation is the default.
:type act: BaseActivation
:param groups: Group size of filters.
:type groups: int
:param stride: The x dimension of the stride. Or input a tuple for two image
dimension.
:type stride: int|tuple|list
:type stride: int | tuple | list
:param padding: The x dimension of the padding. Or input a tuple for two
image dimension
:type padding: int|tuple|list
:type padding: int | tuple | list
:param bias_attr: Convolution bias attribute. None means default bias.
False means no bias.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:param num_channels: number of input channels. If None will be set
automatically from previous output.
:type num_channels: int
......@@ -6919,15 +6920,15 @@ def scale_shift_layer(input, name=None, param_attr=None, bias_attr=None):
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer.
:type input: LayerOutput.
:param input: The input of this layer.
:type input: LayerOutput
:param param_attr: The parameter attribute of scaling.
:type param_attr: ParameterAttribute
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:type bias_attr: ParameterAttribute | None | bool | Any
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -6947,11 +6948,11 @@ def resize_layer(input, size, name=None):
into the output matrix with a shape of [Height x Width / size, size],
where size is the parameter of this layer indicating the output dimension.
:param input: The input to this layer.
:param input: The input of this layer.
:type input: LayerOutput.
:param name: The name of this layer. It is optional.
:type name: basestring
:param size: The resized output dimesion of this layer.
:param size: The resized output dimension of this layer.
:type size: int
:return: A LayerOutput object.
:rtype: LayerOutput
......
import unittest
import numpy as np
from op_test import OpTest
class TestMarginRankLossOp(OpTest):
def setUp(self):
self.op_type = "margin_rank_loss"
batch_size = 5
margin = 0.5
# labels_{i} = {-1, 1}
label = 2 * np.random.randint(
0, 2, size=(batch_size, 1)).astype("float32") - 1
x1 = np.random.random((batch_size, 1)).astype("float32")
x2 = np.random.random((batch_size, 1)).astype("float32")
# loss = max(0, -label * (x1 - x2) + margin)
loss = -label * (x1 - x2) + margin
loss = np.where(loss > 0, loss, 0)
act = np.where(loss > 0, 1., 0.)
self.attrs = {'margin': margin}
self.inputs = {'Label': label, 'X1': x1, 'X2': x2}
self.outputs = {'Activated': act, 'Out': loss}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X1", "X2"], "Out")
def test_check_grad_ignore_x1(self):
self.check_grad(["X2"], "Out", no_grad_set=set('X1'))
def test_check_grad_ignore_x2(self):
self.check_grad(["X1"], "Out", no_grad_set=set('X2'))
if __name__ == '__main__':
unittest.main()
import unittest
import numpy as np
from op_test import OpTest
class TestConcatOp(OpTest):
def set_data(self):
# two level, batch size is 3
x0 = np.random.random((4, 6, 3)).astype('float32')
lod0 = [[0, 2, 4], [0, 1, 2, 3, 4]]
x1 = np.random.random((4, 8, 3)).astype('float32')
lod1 = [[0, 2, 4], [0, 1, 2, 3, 4]]
axis = 1
level = 1
self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]}
self.attrs = {'axis': axis, 'level': level}
outs = []
for i in range(4):
sub_x0 = x0[lod0[level][i]:lod0[level][i + 1], :]
sub_x1 = x1[lod1[level][i]:lod1[level][i + 1], :]
outs.append(np.concatenate((sub_x0, sub_x1), axis=axis))
self.outputs = {'Out': np.concatenate(outs, axis=0)}
def setUp(self):
self.op_type = "sequence_concat"
self.set_data()
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['x0'], 'Out')
class TestConcatOpDiffLod(TestConcatOp):
def set_data(self):
# two level, batch size is 3
x0 = np.random.random((4, 6, 3)).astype('float32')
lod0 = [[0, 2, 4], [0, 1, 2, 3, 4]]
x1 = np.random.random((5, 6, 3)).astype('float32')
lod1 = [[0, 3, 5], [0, 1, 2, 3, 5]]
axis = 0
level = 1
self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]}
self.attrs = {'axis': axis, 'level': level}
outs = []
for i in range(4):
sub_x0 = x0[lod0[level][i]:lod0[level][i + 1], :]
sub_x1 = x1[lod1[level][i]:lod1[level][i + 1], :]
outs.append(np.concatenate((sub_x0, sub_x1), axis=axis))
self.outputs = {'Out': np.concatenate(outs, axis=0)}
class TestConcatOpLevelZero(TestConcatOp):
def set_data(self):
# two level, batch size is 3
x0 = np.random.random((4, 3, 4)).astype('float32')
lod0 = [[0, 2, 4], [0, 1, 2, 3, 4]]
x1 = np.random.random((5, 3, 4)).astype('float32')
lod1 = [[0, 3, 5], [0, 1, 3, 4, 5]]
axis = 0
level = 0
self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]}
self.attrs = {'axis': axis, 'level': level}
outs = []
for i in range(2):
sub_x0 = x0[lod0[level][i]:lod0[level][i + 1], :]
sub_x1 = x1[lod1[level][i]:lod1[level][i + 1], :]
outs.append(np.concatenate((sub_x0, sub_x1), axis=axis))
self.outputs = {'Out': np.concatenate(outs, axis=0)}
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册