未验证 提交 576c740d 编写于 作者: C colourful-tree 提交者: GitHub

Merge pull request #14964 from colourful-tree/data_norm

add data norm op
......@@ -88,6 +88,7 @@ paddle.fluid.layers.pool3d ArgSpec(args=['input', 'pool_size', 'pool_type', 'poo
paddle.fluid.layers.adaptive_pool2d ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None))
paddle.fluid.layers.adaptive_pool3d ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None))
paddle.fluid.layers.batch_norm ArgSpec(args=['input', 'act', 'is_test', 'momentum', 'epsilon', 'param_attr', 'bias_attr', 'data_layout', 'in_place', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var', 'fuse_with_relu', 'use_global_stats'], varargs=None, keywords=None, defaults=(None, False, 0.9, 1e-05, None, None, 'NCHW', False, None, None, None, False, False, False))
paddle.fluid.layers.data_norm ArgSpec(args=['input', 'act', 'epsilon', 'param_attr', 'data_layout', 'in_place', 'use_mkldnn', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var'], varargs=None, keywords=None, defaults=(None, 1e-05, None, 'NCHW', False, False, None, None, None, False))
paddle.fluid.layers.beam_search_decode ArgSpec(args=['ids', 'scores', 'beam_size', 'end_id', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.conv2d_transpose ArgSpec(args=['input', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None))
paddle.fluid.layers.conv3d_transpose ArgSpec(args=['input', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None))
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/data_norm_op.h"
#include <string>
#include "paddle/fluid/framework/data_layout.h"
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
using DataLayout = framework::DataLayout;
template <typename T>
using EigenArrayMap =
Eigen::Map<Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic>>;
template <typename T>
using ConstEigenArrayMap =
Eigen::Map<const Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic>>;
template <typename T>
using EigenVectorArrayMap = Eigen::Map<Eigen::Array<T, Eigen::Dynamic, 1>>;
template <typename T>
using ConstEigenVectorArrayMap =
Eigen::Map<const Eigen::Array<T, Eigen::Dynamic, 1>>;
class DataNormOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "");
PADDLE_ENFORCE(ctx->HasInput("BatchSize"), "");
PADDLE_ENFORCE(ctx->HasInput("BatchSum"), "");
PADDLE_ENFORCE(ctx->HasInput("BatchSquareSum"), "");
PADDLE_ENFORCE(ctx->HasOutput("Means"), "");
PADDLE_ENFORCE(ctx->HasOutput("Scales"), "");
PADDLE_ENFORCE(ctx->HasOutput("Y"), "");
const auto x_dims = ctx->GetInputDim("X");
const DataLayout data_layout = framework::StringToDataLayout(
ctx->Attrs().Get<std::string>("data_layout"));
PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5,
"Input X must have 2 to 5 dimensions.");
const int64_t C =
(data_layout == DataLayout::kNCHW ? x_dims[1]
: x_dims[x_dims.size() - 1]);
PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSize").size(), 1UL);
PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSum").size(), 1UL);
PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSquareSum").size(), 1UL);
PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSize")[0], C);
PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSum")[0], C);
PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSquareSum")[0], C);
ctx->SetOutputDim("Y", x_dims);
ctx->SetOutputDim("Means", {C});
ctx->SetOutputDim("Scales", {C});
ctx->ShareLoD("X", "Y");
}
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
auto input_data_type = ctx.Input<Tensor>("X")->type();
// By default, the type of the scale, bias, mean,
// and var tensors should both be float. (For float or float16 input tensor)
// or double (For double input tensor).
auto dn_param_type = framework::proto::VarType::FP32;
if (input_data_type == framework::proto::VarType::FP64) {
dn_param_type = framework::proto::VarType::FP64;
}
PADDLE_ENFORCE_EQ(dn_param_type, ctx.Input<Tensor>("BatchSize")->type(),
"BatchSize input should be of float type");
PADDLE_ENFORCE_EQ(dn_param_type, ctx.Input<Tensor>("BatchSum")->type(),
"BatchSum input should be of float type");
PADDLE_ENFORCE_EQ(dn_param_type,
ctx.Input<Tensor>("BatchSquareSum")->type(),
"BatchSquareSum input should be of float type");
// TODO(pzelazko-intel): enable MKLDNN layout when it's ready
framework::LibraryType library = framework::LibraryType::kPlain;
framework::DataLayout layout = framework::DataLayout::kAnyLayout;
#ifdef PADDLE_WITH_MKLDNN
if (library == framework::LibraryType::kPlain &&
platform::CanMKLDNNBeUsed(ctx)) {
library = framework::LibraryType::kMKLDNN;
layout = framework::DataLayout::kMKLDNN;
}
#endif
return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout,
library);
}
};
class DataNormOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
// AddAttr<bool>("is_test", "").SetDefault(false);
AddAttr<float>("epsilon", "")
.SetDefault(1e-4)
.AddCustomChecker([](const float &epsilon) {
PADDLE_ENFORCE(epsilon >= 0.0f && epsilon <= 0.001f,
"'epsilon' should be between 0.0 and 0.001.");
});
AddAttr<std::string>("data_layout", "").SetDefault("NCHW");
AddInput("X", "The input tensor");
AddInput("BatchSize",
"BatchSize is a 1-dimensional tensor of size C "
"that is applied to the output");
AddInput("BatchSum",
"BatchSum is a 1-dimensional tensor of size C "
"that is applied to the output");
AddInput("BatchSquareSum",
"The global BatchSquareSum (for training) or "
"estimated BatchSquareSum (for testing)");
AddOutput("Y", "result after normalization");
AddOutput("Means",
"Mean of the history data batch, "
"will apply to output when training")
.AsIntermediate();
AddOutput("Scales",
"Scales of the history data batch, "
"will apply to output when training")
.AsIntermediate();
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false);
AddComment(R"DOC(
Data Normalization.
Can be used as a normalizer function for data
The required data format for this layer is one of the following:
1. NHWC `[batch, in_height, in_width, in_channels]`
2. NCHW `[batch, in_channels, in_height, in_width]`
)DOC");
}
};
template <typename T>
class DataNormKernel<platform::CPUDeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
// const bool is_test = ctx.Attr<bool>("is_test");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout =
framework::StringToDataLayout(data_layout_str);
const auto *x = ctx.Input<Tensor>("X");
const auto &x_dims = x->dims();
PADDLE_ENFORCE(x_dims.size() == 2, "The Input dim size should be 2");
const int N = x_dims[0];
const int C =
(data_layout == DataLayout::kNCHW ? x_dims[1]
: x_dims[x_dims.size() - 1]);
auto *y = ctx.Output<Tensor>("Y");
auto *mean_out = ctx.Output<Tensor>("Means");
auto *scales = ctx.Output<Tensor>("Scales");
// alloc memory
y->mutable_data<T>(ctx.GetPlace());
Eigen::Array<T, Eigen::Dynamic, 1> inv_std(C);
ConstEigenVectorArrayMap<T> b_size_arr(
ctx.Input<Tensor>("BatchSize")->data<T>(), C);
ConstEigenVectorArrayMap<T> b_sum_arr(
ctx.Input<Tensor>("BatchSum")->data<T>(), C);
ConstEigenVectorArrayMap<T> b_square_sum_arr(
ctx.Input<Tensor>("BatchSquareSum")->data<T>(), C);
EigenVectorArrayMap<T> means_arr(mean_out->mutable_data<T>(ctx.GetPlace()),
C);
EigenVectorArrayMap<T> scales_arr(scales->mutable_data<T>(ctx.GetPlace()),
C);
means_arr = b_sum_arr / b_size_arr;
scales_arr = (b_size_arr / b_square_sum_arr).sqrt();
switch (data_layout) {
case DataLayout::kNCHW: // because it's two dimensions, so make no
// difference
case DataLayout::kNHWC: {
EigenArrayMap<T>(y->mutable_data<T>(ctx.GetPlace()), C, N) =
(ConstEigenArrayMap<T>(x->data<T>(), C, N).colwise() - means_arr)
.colwise() *
scales_arr;
break;
}
default:
PADDLE_THROW("Unknown storage order: %d", data_layout);
}
}
};
class DataNormGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
// check input
PADDLE_ENFORCE(ctx->HasInput("X"));
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), "");
PADDLE_ENFORCE(ctx->HasInput("BatchSize"), "");
PADDLE_ENFORCE(ctx->HasInput("BatchSum"), "");
PADDLE_ENFORCE(ctx->HasInput("BatchSquareSum"), "");
PADDLE_ENFORCE(ctx->HasInput("Means"), "");
PADDLE_ENFORCE(ctx->HasInput("Scales"), "");
// check output
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), "");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("BatchSize")), "");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("BatchSum")), "");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("BatchSquareSum")),
"");
const auto x_dims = ctx->GetInputDim("X");
const DataLayout data_layout = framework::StringToDataLayout(
ctx->Attrs().Get<std::string>("data_layout"));
const int C =
(data_layout == DataLayout::kNCHW ? x_dims[1]
: x_dims[x_dims.size() - 1]);
ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
ctx->SetOutputDim(framework::GradVarName("BatchSize"), {C});
ctx->SetOutputDim(framework::GradVarName("BatchSum"), {C});
ctx->SetOutputDim(framework::GradVarName("BatchSquareSum"), {C});
}
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
const auto *var = ctx.InputVar(framework::GradVarName("Y"));
if (var == nullptr) {
PADDLE_THROW("can't find Y@GRAD");
}
const Tensor *t = nullptr;
if (var->IsType<Tensor>()) {
t = &var->Get<Tensor>();
} else if (var->IsType<LoDTensor>()) {
t = &var->Get<LoDTensor>();
}
if (t == nullptr) {
PADDLE_THROW("can't find Y@GRAD");
}
// TODO(pzelazko-intel): enable MKLDNN layout when it's ready
framework::LibraryType library = framework::LibraryType::kPlain;
framework::DataLayout layout = framework::DataLayout::kAnyLayout;
#ifdef PADDLE_WITH_MKLDNN
if (library == framework::LibraryType::kPlain &&
platform::CanMKLDNNBeUsed(ctx)) {
library = framework::LibraryType::kMKLDNN;
layout = framework::DataLayout::kMKLDNN;
}
#endif
return framework::OpKernelType(ctx.Input<Tensor>("X")->type(),
ctx.GetPlace(), layout, library);
}
};
template <typename T>
class DataNormGradKernel<platform::CPUDeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
const auto *x = ctx.Input<Tensor>("X");
const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
const auto *batch_size = ctx.Input<Tensor>("BatchSize");
const auto *batch_sum = ctx.Input<Tensor>("BatchSum");
const auto *batch_square_sum = ctx.Input<Tensor>("BatchSquareSum");
const auto *scales = ctx.Input<Tensor>("Scales");
const auto *means = ctx.Input<Tensor>("Means");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout =
framework::StringToDataLayout(data_layout_str);
// Get the size for each dimension.
// NCHW [batch_size, in_channels, in_height, in_width]
const auto &x_dims = x->dims();
PADDLE_ENFORCE(x_dims.size() == 2, "The Input dim size should be 2");
const int N = x_dims[0];
const int C =
(data_layout == DataLayout::kNCHW ? x_dims[1]
: x_dims[x_dims.size() - 1]);
// init output
auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *d_batch_size =
ctx.Output<Tensor>(framework::GradVarName("BatchSize"));
auto *d_batch_sum = ctx.Output<Tensor>(framework::GradVarName("BatchSum"));
auto *d_batch_square_sum =
ctx.Output<Tensor>(framework::GradVarName("BatchSquareSum"));
EigenVectorArrayMap<T> d_batch_size_arr(
d_batch_size->mutable_data<T>(ctx.GetPlace()), C);
EigenVectorArrayMap<T> d_batch_sum_arr(
d_batch_sum->mutable_data<T>(ctx.GetPlace()), C);
EigenVectorArrayMap<T> d_batch_square_sum_arr(
d_batch_square_sum->mutable_data<T>(ctx.GetPlace()), C);
d_batch_size_arr.setZero();
d_batch_sum_arr.setZero();
d_batch_square_sum_arr.setZero();
const float epsilon = ctx.Attr<float>("epsilon");
switch (
data_layout) { // because it's two dimensions, so make no difference
case DataLayout::kNCHW:
case DataLayout::kNHWC: {
ConstEigenVectorArrayMap<T> scales_arr(scales->data<T>(), C);
ConstEigenVectorArrayMap<T> means_arr(means->data<T>(), C);
ConstEigenArrayMap<T> x_arr(x->data<T>(), C, N);
ConstEigenArrayMap<T> d_y_arr(d_y->data<T>(), C, N);
EigenArrayMap<T> d_x_arr(d_x->mutable_data<T>(ctx.GetPlace()), C, N);
d_x_arr.setZero();
for (int nc = 0; nc < N; ++nc) {
d_x_arr.col(nc) = d_y_arr.col(nc) * scales_arr;
}
// calculate data sum and squre sum
ConstEigenVectorArrayMap<T> batch_size_arr(batch_size->data<T>(), C);
ConstEigenVectorArrayMap<T> batch_sum_arr(batch_sum->data<T>(), C);
ConstEigenVectorArrayMap<T> batch_square_sum_arr(
batch_square_sum->data<T>(), C);
Eigen::Array<T, Eigen::Dynamic, 1> sample_sum(C);
Eigen::Array<T, Eigen::Dynamic, 1> sample_square_sum(C);
// calculate data sample sum and square sum
sample_sum.setZero();
sample_square_sum.setZero();
for (int nc = 0; nc < N; ++nc) {
sample_sum += x_arr.col(nc);
sample_square_sum += (x_arr.col(nc) - means_arr).square();
}
// calculate gradient
d_batch_size_arr.setConstant(N);
d_batch_sum_arr = sample_sum;
d_batch_square_sum_arr = sample_square_sum + d_batch_size_arr * epsilon;
break;
}
default:
PADDLE_THROW("Unknown storage order: %s", data_layout_str);
}
}
};
class DataNormGradMaker : public framework::SingleGradOpDescMaker {
public:
using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
protected:
std::unique_ptr<framework::OpDesc> Apply() const override {
auto *op = new framework::OpDesc();
op->SetType("data_norm_grad");
op->SetInput("X", Input("X"));
op->SetInput(framework::GradVarName("Y"), OutputGrad("Y"));
op->SetInput("BatchSize", Input("BatchSize"));
op->SetInput("BatchSum", Input("BatchSum"));
op->SetInput("BatchSquareSum", Input("BatchSquareSum"));
op->SetInput("Scales", Output("Scales"));
op->SetInput("Means", Output("Means"));
op->SetAttrMap(Attrs());
op->SetOutput(framework::GradVarName("X"), InputGrad("X"));
op->SetOutput(framework::GradVarName("BatchSize"), InputGrad("BatchSize"));
op->SetOutput(framework::GradVarName("BatchSum"), InputGrad("BatchSum"));
op->SetOutput(framework::GradVarName("BatchSquareSum"),
InputGrad("BatchSquareSum"));
return std::unique_ptr<framework::OpDesc>(op);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(data_norm, ops::DataNormOp, ops::DataNormOpMaker,
ops::DataNormGradMaker);
REGISTER_OPERATOR(data_norm_grad, ops::DataNormGradOp);
REGISTER_OP_CPU_KERNEL(
data_norm, ops::DataNormKernel<paddle::platform::CPUDeviceContext, float>,
ops::DataNormKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
data_norm_grad,
ops::DataNormGradKernel<paddle::platform::CPUDeviceContext, float>,
ops::DataNormGradKernel<paddle::platform::CPUDeviceContext, double>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
namespace paddle {
namespace operators {
template <typename DeviceContext, typename T>
class DataNormKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override;
};
template <typename DeviceContext, typename T>
class DataNormGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override;
};
} // namespace operators
} // namespace paddle
......@@ -58,6 +58,7 @@ __all__ = [
'adaptive_pool2d',
'adaptive_pool3d',
'batch_norm',
'data_norm',
'beam_search_decode',
'conv2d_transpose',
'conv3d_transpose',
......@@ -2897,6 +2898,133 @@ def batch_norm(input,
return helper.append_activation(batch_norm_out)
def data_norm(input,
act=None,
epsilon=1e-05,
param_attr=None,
data_layout='NCHW',
in_place=False,
use_mkldnn=False,
name=None,
moving_mean_name=None,
moving_variance_name=None,
do_model_average_for_mean_and_var=False):
"""
**Data Normalization Layer**
Can be used as a normalizer function for conv2d and fully_connected operations.
The required data format for this layer is one of the following:
1. NHWC `[batch, in_height, in_width, in_channels]`
2. NCHW `[batch, in_channels, in_height, in_width]`
:math:`input` is the input features over a mini-batch.
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\
\ mini-batch\ mean \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
Args:
input(variable): The input variable which is a LoDTensor.
act(string, Default None): Activation type, linear|relu|prelu|...
epsilon(float, Default 1e-05):
param_attr(ParamAttr): The parameter attribute for Parameter `scale`.
data_layout(string, default NCHW): NCHW|NHWC
in_place(bool, Default False): Make the input and output of batch norm reuse memory.
use_mkldnn(bool, Default false): ${use_mkldnn_comment}
name(string, Default None): A name for this layer(optional). If set None, the layer
will be named automatically.
moving_mean_name(string, Default None): The name of moving_mean which store the global Mean.
moving_variance_name(string, Default None): The name of the moving_variance which store the global Variance.
do_model_average_for_mean_and_var(bool, Default False): Do model average for mean and variance or not.
Returns:
Variable: A tensor variable which is the result after applying data normalization on the input.
Examples:
.. code-block:: python
data = fluid.layers.data(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.data_norm(input=hidden1)
"""
helper = LayerHelper('data_norm', **locals())
dtype = helper.input_dtype()
input_shape = input.shape
if data_layout == 'NCHW':
channel_num = input_shape[1]
else:
if data_layout == 'NHWC':
channel_num = input_shape[-1]
else:
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [channel_num]
batch_size_default = 1e4
batch_sum_default = 0.0
batch_square_sum_default = 1e4
if param_attr and isinstance(param_attr, dict):
batch_size_default = param_attr.get("batch_size", 1e4)
batch_sum_default = param_attr.get("batch_sum", 0.0)
batch_square_sum_default = param_attr.get("batch_square", 1e4)
# create parameter
batch_size = helper.create_parameter(
attr=ParamAttr(
name=name + '.batch_size',
initializer=Constant(value=float(batch_size_default)),
trainable=True),
shape=param_shape,
dtype=input.dtype)
batch_sum = helper.create_parameter(
attr=ParamAttr(
name=name + '.batch_sum',
initializer=Constant(value=float(batch_sum_default)),
trainable=True),
shape=param_shape,
dtype=input.dtype)
batch_square_sum = helper.create_parameter(
attr=ParamAttr(
name=name + '.batch_square_sum',
initializer=Constant(value=float(batch_square_sum_default)),
trainable=True),
shape=param_shape,
dtype=input.dtype)
means = helper.create_variable(dtype=dtype, stop_gradient=True)
scales = helper.create_variable(dtype=dtype, stop_gradient=True)
data_norm_out = input if in_place else helper.create_variable(dtype=dtype)
helper.append_op(
type="data_norm",
inputs={
"X": input,
"BatchSize": batch_size,
"BatchSum": batch_sum,
"BatchSquareSum": batch_square_sum
},
outputs={"Y": data_norm_out,
"Means": means,
"Scales": scales},
attrs={"epsilon": epsilon,
"use_mkldnn": use_mkldnn})
return helper.append_activation(data_norm_out)
@templatedoc()
def layer_norm(input,
scale=True,
......@@ -3065,9 +3193,9 @@ def group_norm(input,
inputs['Bias'] = bias
# create output
mean_out = helper.create_tmp_variable(dtype=dtype, stop_gradient=True)
variance_out = helper.create_tmp_variable(dtype=dtype, stop_gradient=True)
group_norm_out = helper.create_tmp_variable(dtype)
mean_out = helper.create_variable(dtype=dtype, stop_gradient=True)
variance_out = helper.create_variable(dtype=dtype, stop_gradient=True)
group_norm_out = helper.create_variable(dtype)
helper.append_op(
type="group_norm",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册