未验证 提交 19fd0717 编写于 作者: Q qingqing01 提交者: GitHub

Make the normalization operator more general and fix bug in l2_normalize. (#11348)

* Add normalization operator.
1. Refine the raw norm_op and let it more general to support to normalize Tensor along any axis.
2. There is a bug in l2_normalize API, which lacks sqrt after `reduce_sum`.
3. Use norm_op to refine the l2_normalize API.
4. Fix bug in test_normalization_wrapper.py.
上级 f15504e5
...@@ -16,40 +16,34 @@ limitations under the License. */ ...@@ -16,40 +16,34 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace operators { namespace operators {
template <typename AttrType>
class NormOpMaker : public framework::OpProtoAndCheckerMaker { class NormOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() override { void Make() override {
AddInput( AddInput("X", "(Tensor) A tensor of rank >= axis.");
"X", AddAttr<int>("axis",
"(Tensor) The input tensor of norm operator. " "The axis on which to apply normalization. If axis < 0, "
"The format of input tensor is NCHW. Where N is batch size, C is the " "the dimension to normalization is rank(X) + axis. -1 is "
"number of channels, H and W is the height and width of feature."); "the last dimension.");
AddInput("Scale", AddAttr<float>("epsilon",
"(Tensor) The input tensor of norm operator. " "(float, default 1e-10) The epsilon value is used "
"The format of input tensor is C * 1."); "to avoid division by zero.")
AddAttr<AttrType>("epsilon",
"(float, default 1e-10) Constant "
"for numerical stability.")
.SetDefault(1.0e-10f); .SetDefault(1.0e-10f);
AddOutput("Out", AddOutput("Norm",
"(Tensor) The output tensor of norm operator." "(Tensor) A tensor saved the `sqrt(sum(x) + epsion)` will "
"N * M." "be used in backward kernel.")
"M = C * H * W"); .AsIntermediate();
AddOutput("Out", "(Tensor) A tensor of the same shape as X.");
AddComment(R"DOC( AddComment(R"DOC(
"Input shape: $(N, C, H, W)$
Scale shape: $(C, 1)$ Given a tensor, apply 2-normalization along the provided axis.
Output shape: $(N, C, H, W)$
Where $$
forward y = \frac{x}{ \sqrt{\sum {x^2} + epsion }}
$$ $$
[\frac {x_{1}}{\sqrt{\sum{x_{i}^{2}}}} \frac {x_{2}}{\sqrt{\sum{x_{i}^{2}}}} \frac {x_{3}}{\sqrt{\sum{x_{i}^{2}}}} \cdot \cdot \cdot \frac {x_{n}}{\sqrt{\sum{x_{i}^{2}}}}]
$$ where, $\sum {x^2}$ is calculated along the `axis` dimension.
backward
$$ )DOC");
\frac{\frac{\mathrm{d}L }{\mathrm{d}y_{1}} - \frac {x_{1}\sum {\frac{\mathrm{d} L}{\mathrm{d} y_{j}}}x_{j}}{\sum x_{j}^{2}} }{\sqrt{\sum{x_{j}^{2}}}}
$$
)DOC");
} }
}; };
...@@ -58,15 +52,15 @@ class NormOp : public framework::OperatorWithKernel { ...@@ -58,15 +52,15 @@ class NormOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of NormOp" "Input(X) of NormOp should not be null.");
"should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Scale"),
"Input(Scale) of NormOp"
"should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"), PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of NormOp should not be null."); "Output(Out) of NormOp should not be null.");
auto in_x_dims = ctx->GetInputDim("X"); auto xdim = ctx->GetInputDim("X");
ctx->SetOutputDim("Out", in_x_dims); ctx->SetOutputDim("Out", xdim);
int axis = ctx->Attrs().Get<int>("axis");
if (axis < 0) axis = xdim.size() + axis;
xdim[axis] = 1;
ctx->SetOutputDim("Norm", xdim);
} }
}; };
...@@ -84,12 +78,12 @@ class NormOpGrad : public framework::OperatorWithKernel { ...@@ -84,12 +78,12 @@ class NormOpGrad : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OPERATOR(norm, ops::NormOp, ops::NormOpMaker<float>, using CPU = paddle::platform::CPUDeviceContext;
REGISTER_OPERATOR(norm, ops::NormOp, ops::NormOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>); paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(norm_grad, ops::NormOpGrad); REGISTER_OPERATOR(norm_grad, ops::NormOpGrad);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(norm, ops::NormKernel<CPU, float>,
norm, ops::NormKernel<paddle::platform::CPUDeviceContext, float>, ops::NormKernel<CPU, double>);
ops::NormKernel<paddle::platform::CPUDeviceContext, double, float>); REGISTER_OP_CPU_KERNEL(norm_grad, ops::NormGradKernel<CPU, float>,
REGISTER_OP_CPU_KERNEL( ops::NormGradKernel<CPU, double>);
norm_grad, ops::NormGradKernel<paddle::platform::CPUDeviceContext, float>,
ops::NormGradKernel<paddle::platform::CPUDeviceContext, double, float>);
...@@ -16,9 +16,9 @@ limitations under the License. */ ...@@ -16,9 +16,9 @@ limitations under the License. */
#include "paddle/fluid/operators/norm_op.h" #include "paddle/fluid/operators/norm_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL( using CUDA = paddle::platform::CUDADeviceContext;
norm, ops::NormKernel<paddle::platform::CUDADeviceContext, float>,
ops::NormKernel<paddle::platform::CUDADeviceContext, double, float>); REGISTER_OP_CUDA_KERNEL(norm, ops::NormKernel<CUDA, float>,
REGISTER_OP_CUDA_KERNEL( ops::NormKernel<CUDA, double>);
norm_grad, ops::NormGradKernel<paddle::platform::CUDADeviceContext, float>, REGISTER_OP_CUDA_KERNEL(norm_grad, ops::NormGradKernel<CUDA, float>,
ops::NormGradKernel<paddle::platform::CUDADeviceContext, double, float>); ops::NormGradKernel<CUDA, double>);
...@@ -19,156 +19,110 @@ limitations under the License. */ ...@@ -19,156 +19,110 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace operators { namespace operators {
template <typename DeviceContext, typename T, typename AttrType = T> inline void GetDims(const framework::DDim& dim, int axis, int* pre, int* n,
int* post) {
*pre = 1;
*post = 1;
*n = dim[axis];
for (int i = 0; i < axis; ++i) {
(*pre) *= dim[i];
}
for (int i = axis + 1; i < dim.size(); ++i) {
(*post) *= dim[i];
}
}
template <typename DeviceContext, typename T>
class NormKernel : public framework::OpKernel<T> { class NormKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& ctx) const override {
const framework::Tensor* in_x = context.Input<framework::Tensor>("X"); auto* in_x = ctx.Input<framework::Tensor>("X");
const framework::Tensor* scale = context.Input<framework::Tensor>("Scale"); auto* out_y = ctx.Output<framework::Tensor>("Out");
auto* out = context.Output<framework::Tensor>("Out"); auto* out_norm = ctx.Output<framework::Tensor>("Norm");
auto epsilon = static_cast<T>(context.Attr<AttrType>("epsilon")); out_y->mutable_data<T>(ctx.GetPlace());
out->mutable_data<T>(context.GetPlace()); out_norm->mutable_data<T>(ctx.GetPlace());
int batch_size = in_x->dims()[0];
int channels = in_x->dims()[1]; auto xdim = in_x->dims();
int height = in_x->dims()[2]; auto ndim = out_norm->dims();
int width = in_x->dims()[3]; T eps = static_cast<T>(ctx.Attr<float>("epsilon"));
int fea_len = height * width; int axis = ctx.Attr<int>("axis");
auto* place = if (axis < 0) axis = xdim.size() + axis;
context.template device_context<DeviceContext>().eigen_device(); int pre, n, post;
auto x = GetDims(xdim, axis, &pre, &n, &post);
framework::EigenMatrix<T, Eigen::RowMajor, Eigen::DenseIndex>::From(
*in_x, framework::make_ddim({batch_size, fea_len * channels})); auto* place = ctx.template device_context<DeviceContext>().eigen_device();
// get square
framework::Tensor x_square; Eigen::DSizes<int, 3> shape(pre, n, post);
x_square.mutable_data<T>(in_x->dims(), context.GetPlace()); Eigen::DSizes<int, 2> norm_shape(pre, post);
auto x_square_eigen =
framework::EigenMatrix<T, Eigen::RowMajor, Eigen::DenseIndex>::From( auto x_e = framework::EigenVector<T>::Flatten(*in_x);
x_square, framework::make_ddim({batch_size, fea_len * channels})); auto y_e = framework::EigenVector<T>::Flatten(*out_y);
x_square_eigen.device(*place) = x.square(); auto norm_e = framework::EigenVector<T>::Flatten(*out_norm);
auto scale_eigen = auto x = x_e.reshape(shape);
framework::EigenVector<T, Eigen::RowMajor, Eigen::DenseIndex>::Flatten( auto y = y_e.reshape(shape);
*scale); auto norm = norm_e.reshape(norm_shape);
for (int n = 0; n < batch_size; ++n) {
framework::Tensor in_x_batch = in_x->Slice(n, n + 1); Eigen::DSizes<int, 1> rdim(1);
auto in_x_batch_eigen = // y = x / sqrt((sum(x * x) + epsilon))
framework::EigenMatrix<T, Eigen::RowMajor, Eigen::DenseIndex>::From( // norm = sqrt(sum(x * x) + epsilon)
in_x_batch, framework::make_ddim({channels, fea_len})); auto sum = x.pow(2).sum(rdim) + eps;
framework::Tensor x_square_batch = x_square.Slice(n, n + 1); norm.device(*place) = sum.sqrt();
auto x_square_batch_eigen = // y = x / norm
framework::EigenMatrix<T, Eigen::RowMajor, Eigen::DenseIndex>::From( Eigen::DSizes<int, 3> rshape(pre, 1, post);
x_square_batch, framework::make_ddim({channels, fea_len})); Eigen::DSizes<int, 3> bcast(1, n, 1);
framework::Tensor out_batch = out->Slice(n, n + 1); y.device(*place) = x / norm.reshape(rshape).broadcast(bcast);
auto out_batch_eigen =
framework::EigenMatrix<T, Eigen::RowMajor, Eigen::DenseIndex>::From(
out_batch, framework::make_ddim({channels, fea_len}));
framework::Tensor tmp_tensor;
tmp_tensor.mutable_data<T>(framework::make_ddim({1, fea_len}),
context.GetPlace());
auto tmp = framework::EigenVector<T, Eigen::RowMajor,
Eigen::DenseIndex>::Flatten(tmp_tensor);
// get colsum and sqrt , inverse
auto dim = Eigen::array<int, 1>({{0}});
tmp.device(*place) = x_square_batch_eigen.sum(dim);
tmp.device(*place) = (tmp + epsilon).sqrt().inverse();
Eigen::array<int, 2> broadcast_dim_col;
broadcast_dim_col[1] = 1;
broadcast_dim_col[0] = channels;
out_batch_eigen.device(*place) =
in_x_batch_eigen * (tmp.broadcast(broadcast_dim_col));
Eigen::array<int, 2> broadcast_dim_row;
broadcast_dim_row[1] = fea_len;
broadcast_dim_row[0] = 1;
out_batch_eigen.device(*place) =
out_batch_eigen * (scale_eigen.broadcast(broadcast_dim_row));
}
} }
}; };
template <typename DeviceContext, typename T, typename AttrType = T> template <typename DeviceContext, typename T, typename AttrType = T>
class NormGradKernel : public framework::OpKernel<T> { class NormGradKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& ctx) const override {
const framework::Tensor* in_x = context.Input<framework::Tensor>("X"); auto* in_x = ctx.Input<framework::Tensor>("X");
const framework::Tensor* scale = context.Input<framework::Tensor>("Scale"); auto* in_norm = ctx.Input<framework::Tensor>("Norm");
const framework::Tensor* out_grad = auto* in_dy = ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
context.Input<framework::Tensor>(framework::GradVarName("Out")); auto* out_dx = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
auto epsilon = static_cast<T>(context.Attr<AttrType>("epsilon")); out_dx->mutable_data<T>(ctx.GetPlace());
framework::Tensor* in_x_grad =
context.Output<framework::Tensor>(framework::GradVarName("X")); auto xdim = in_x->dims();
in_x_grad->mutable_data<T>(context.GetPlace()); int axis = ctx.Attr<int>("axis");
int batch_size = in_x->dims()[0]; if (axis < 0) axis = xdim.size() + axis;
int channels = in_x->dims()[1]; int pre, n, post;
int height = in_x->dims()[2]; GetDims(xdim, axis, &pre, &n, &post);
int width = in_x->dims()[3];
int fea_len = height * width; auto* place = ctx.template device_context<DeviceContext>().eigen_device();
auto* place =
context.template device_context<DeviceContext>().eigen_device(); auto x_e = framework::EigenVector<T>::Flatten(*in_x);
auto dy_e = framework::EigenVector<T>::Flatten(*in_dy);
auto scale_eigen = auto norm_e = framework::EigenVector<T>::Flatten(*in_norm);
framework::EigenVector<T, Eigen::RowMajor, Eigen::DenseIndex>::Flatten( auto dx_e = framework::EigenVector<T>::Flatten(*out_dx);
*scale);
auto x = Eigen::DSizes<int, 3> shape(pre, n, post);
framework::EigenMatrix<T, Eigen::RowMajor, Eigen::DenseIndex>::From( Eigen::DSizes<int, 2> norm_shape(pre, post);
*in_x, framework::make_ddim({batch_size, fea_len * channels})); auto x = x_e.reshape(shape);
// get square auto dy = dy_e.reshape(shape);
framework::Tensor x_square; auto norm = norm_e.reshape(norm_shape);
x_square.mutable_data<T>(in_x->dims(), context.GetPlace()); auto dx = dx_e.reshape(shape);
auto x_square_eigen =
framework::EigenMatrix<T, Eigen::RowMajor, Eigen::DenseIndex>::From( framework::Tensor rsum;
x_square, framework::make_ddim({batch_size, fea_len * channels})); rsum.mutable_data<T>({pre, post}, ctx.GetPlace());
x_square_eigen.device(*place) = x.square(); auto sum = framework::EigenTensor<T, 2>::From(rsum);
for (int n = 0; n < batch_size; ++n) { Eigen::DSizes<int, 1> rdim(1);
framework::Tensor in_x_batch = in_x->Slice(n, n + 1); Eigen::DSizes<int, 3> bcast(1, n, 1);
auto in_x_batch_eigen = Eigen::DSizes<int, 3> rshape(pre, 1, post);
framework::EigenMatrix<T, Eigen::RowMajor, Eigen::DenseIndex>::From(
in_x_batch, framework::make_ddim({channels, fea_len})); // dx = ( dy/sqrt(sum(x*x)) ) * [1 - x*sum(x) / (sum(x*x) + e)]
framework::Tensor in_g_batch = in_x_grad->Slice(n, n + 1); // = [dy - dy * x * sum(x) / (sum(x*x) + e)] / sqrt(sum(x*x))
auto in_g_batch_eigen = // = [dy - x * sum(x*dy) / (sum(x*x) + e)] / sqrt(sum(x*x))
framework::EigenMatrix<T, Eigen::RowMajor, Eigen::DenseIndex>::From( // 1. sum = sum(x*dy)
in_g_batch, framework::make_ddim({channels, fea_len})); sum.device(*place) = (x * dy).sum(rdim);
framework::Tensor x_square_batch = x_square.Slice(n, n + 1); // 2. dx = x * sum
auto x_square_batch_eigen = dx.device(*place) = sum.reshape(rshape).broadcast(bcast) * x;
framework::EigenMatrix<T, Eigen::RowMajor, Eigen::DenseIndex>::From( // 3. dx / (sum(x*x) + e)
x_square_batch, framework::make_ddim({channels, fea_len})); // where, norm.pow(2) = sum(x*x) + e, which is calculated in forward.
framework::Tensor outg_batch = out_grad->Slice(n, n + 1); dx.device(*place) = dx / norm.pow(2).broadcast(bcast);
auto outg_batch_eigen = // 4. [dy - dx] / sqrt(sum(x*x))
framework::EigenMatrix<T, Eigen::RowMajor, Eigen::DenseIndex>::From( dx.device(*place) = (dy - dx) / norm.broadcast(bcast);
outg_batch, framework::make_ddim({channels, fea_len}));
framework::Tensor tmp_tensor;
tmp_tensor.mutable_data<T>(framework::make_ddim({1, fea_len}),
context.GetPlace());
auto tmp_eigen =
framework::EigenVector<T, Eigen::RowMajor,
Eigen::DenseIndex>::Flatten(tmp_tensor);
auto dim = Eigen::array<int, 1>({{0}});
tmp_eigen.device(*place) = (in_x_batch_eigen * outg_batch_eigen).sum(dim);
framework::Tensor norm_tmp_tensor;
norm_tmp_tensor.mutable_data<T>(framework::make_ddim({1, fea_len}),
context.GetPlace());
auto norm_tmp_eigen =
framework::EigenVector<T, Eigen::RowMajor,
Eigen::DenseIndex>::Flatten(norm_tmp_tensor);
norm_tmp_eigen.device(*place) =
(x_square_batch_eigen.sum(dim) + epsilon).sqrt();
Eigen::array<int, 2> broadcast_dim_col;
broadcast_dim_col[1] = 1;
broadcast_dim_col[0] = channels;
in_g_batch_eigen.device(*place) =
in_x_batch_eigen * tmp_eigen.broadcast(broadcast_dim_col);
in_g_batch_eigen.device(*place) =
in_g_batch_eigen /
(norm_tmp_eigen * norm_tmp_eigen).broadcast(broadcast_dim_col);
in_g_batch_eigen.device(*place) = outg_batch_eigen - in_g_batch_eigen;
// outg_batch_eigen + (in_g_batch_eigen * -1);
in_g_batch_eigen.device(*place) =
in_g_batch_eigen / norm_tmp_eigen.broadcast(broadcast_dim_col);
Eigen::array<int, 2> broadcast_dim_row;
broadcast_dim_row[1] = fea_len;
broadcast_dim_row[0] = 1;
in_g_batch_eigen.device(*place) =
in_g_batch_eigen * (scale_eigen.broadcast(broadcast_dim_row));
}
} }
}; };
} // namespace operators } // namespace operators
......
...@@ -2467,19 +2467,21 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None): ...@@ -2467,19 +2467,21 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None):
The l2 normalize layer normalizes `x` along dimension `axis` using an L2 The l2 normalize layer normalizes `x` along dimension `axis` using an L2
norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes
output = x / sqrt(max(sum(x**2), epsilon)) .. math::
y = \frac{x}{ \sqrt{\sum {x^2} + epsion }}
For `x` with more dimensions, this layer independently normalizes each 1-D For `x` with more dimensions, this layer independently normalizes each 1-D
slice along dimension `axis`. slice along dimension `axis`.
Args: Args:
x(Variable|list): The input tensor to l2_normalize layer. x(Variable|list): The input tensor to l2_normalize layer.
axis(int): Dimension along which to normalize the input. axis(int): The axis on which to apply normalization. If `axis < 0`,
epsilon(float): A lower bound value for `x`'s l2 norm. sqrt(epsilon) will the dimension to normalization is rank(X) + axis. -1 is the
be used as the divisor if the l2 norm of `x` is less than last dimension.
sqrt(epsilon). epsilon(float): The epsilon value is used to avoid division by zero,
the defalut value is 1e-10.
name(str|None): A name for this layer(optional). If set None, the layer name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically. will be named automatically.
Returns: Returns:
...@@ -2498,46 +2500,17 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None): ...@@ -2498,46 +2500,17 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None):
axis = 0 axis = 0
helper = LayerHelper("l2_normalize", **locals()) helper = LayerHelper("l2_normalize", **locals())
square = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(type="square", inputs={"X": x}, outputs={"Out": square}) norm = helper.create_tmp_variable(dtype=x.dtype)
reduced_sum = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op( helper.append_op(
type="reduce_sum", type="norm",
inputs={"X": square}, inputs={"X": x},
outputs={"Out": reduced_sum}, outputs={"Out": out,
"Norm": norm},
attrs={ attrs={
"dim": [1] if axis is None else [axis], "axis": 1 if axis is None else axis,
"keep_dim": True, "epsilon": epsilon,
"reduce_all": False
}) })
# TODO(caoying) A lower bound value epsilon for the norm is needed to
# imporve the numeric stability of reciprocal. This requires a maximum_op.
rsquare = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(
type="reciprocal", inputs={"X": reduced_sum}, outputs={"Out": rsquare})
# TODO(caoying) the current elementwise_mul operator does not support a
# general broadcast rule which broadcasts input(Y) to have the same
# dimension with Input(X) starting from a specified dimension. So this
# exanpsion is requred. Once a general broadcast rule is spported, this
# expanding canbe removed.
rsquare_expanded = helper.create_tmp_variable(dtype=x.dtype)
expand_times = [1] * len(x.shape)
expand_times[axis] = int(x.shape[axis])
helper.append_op(
type="expand",
inputs={"X": rsquare},
outputs={"Out": rsquare_expanded},
attrs={"expand_times": expand_times})
out = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(
type="elementwise_mul",
inputs={"X": x,
"Y": rsquare_expanded},
outputs={"Out": out})
return out return out
......
...@@ -387,6 +387,12 @@ class TestBook(unittest.TestCase): ...@@ -387,6 +387,12 @@ class TestBook(unittest.TestCase):
self.assertIsNotNone(output) self.assertIsNotNone(output)
print(str(program)) print(str(program))
def test_l2_normalize(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[8, 7, 10], dtype="float32")
output = layers.l2_normalize(x, axis=1)
def test_maxout(self): def test_maxout(self):
program = Program() program = Program()
with program_guard(program): with program_guard(program):
......
...@@ -17,44 +17,23 @@ import numpy as np ...@@ -17,44 +17,23 @@ import numpy as np
from op_test import OpTest from op_test import OpTest
def norm(input, scale, epsilon): def l2_norm(x, axis, epsilon):
s0, s1, s2, s3 = input.shape x2 = x**2
x_square = input * input s = np.sum(x2, axis=axis, keepdims=True)
for i in xrange(s0): r = np.sqrt(s + epsilon)
input_batch = input[i:i + 1, :, :, :] y = x / np.broadcast_to(r, x.shape)
input_batch = input_batch.reshape(s1, s2 * s3) return y, r
x_square_batch = x_square[i:i + 1, :, :, :]
x_square_batch = x_square_batch.reshape(s1, s2 * s3)
square_colsum = x_square_batch.sum(axis=0) + epsilon
tmp = pow(square_colsum, 0.5)
tmp = np.reciprocal(tmp)
tmp_tile = np.tile(tmp, s1)
tmp_tile = tmp_tile.reshape(s1, s2 * s3)
scale_tile = np.tile(scale, (1, s2 * s3))
scale_tile = scale_tile.reshape(s1, s2 * s3)
out_batch = input_batch * tmp_tile * scale_tile
out_batch = out_batch.reshape(1, s1, s2, s3)
if i == 0:
out = out_batch
else:
out = np.concatenate((out, out_batch), 0)
out.reshape(s0, s1, s2, s3)
return out
class TestNormOp(OpTest): class TestNormOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "norm" self.op_type = "norm"
self.init_test_case() self.init_test_case()
input = np.random.random(self.shape).astype("float32") x = np.random.random(self.shape).astype("float64")
scale = np.array([10, 10, 10]) y, norm = l2_norm(x, self.axis, self.epsilon)
self.inputs = { self.inputs = {'X': x}
'X': input.astype('float32'), self.attrs = {'epsilon': self.epsilon, 'axis': self.axis}
'Scale': scale.astype('float32') self.outputs = {'Out': y, 'Norm': norm}
}
self.attrs = {'epsilon': self.epsilon}
output = norm(input, scale, self.epsilon)
self.outputs = {'Out': output.astype('float32')}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
...@@ -63,8 +42,23 @@ class TestNormOp(OpTest): ...@@ -63,8 +42,23 @@ class TestNormOp(OpTest):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
def init_test_case(self): def init_test_case(self):
self.shape = [2, 3, 2, 2] self.shape = [2, 3, 4, 4]
self.epsilon = 1e-6 self.axis = 1
self.epsilon = 1e-8
class TestNormOp2(TestNormOp):
def init_test_case(self):
self.shape = [5, 3, 9, 7]
self.axis = 0
self.epsilon = 1e-8
class TestNormOp3(TestNormOp):
def init_test_case(self):
self.shape = [5, 3, 2, 7]
self.axis = -1
self.epsilon = 1e-8
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -70,8 +70,9 @@ class TestNormalization(unittest.TestCase): ...@@ -70,8 +70,9 @@ class TestNormalization(unittest.TestCase):
def l2_normalize(self, data, axis, epsilon): def l2_normalize(self, data, axis, epsilon):
""" Compute the groundtruth. """ Compute the groundtruth.
""" """
output = data * np.reciprocal( output = data / np.broadcast_to(
np.sum(np.square(data), axis=axis, keepdims=True)) np.sqrt(np.sum(np.square(data), axis=axis, keepdims=True)),
data.shape)
return output return output
def test_l2_normalize(self): def test_l2_normalize(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册