未验证 提交 8daccc9e 编写于 作者: C ceci3 提交者: GitHub

Fix batch norm double grad compute (#27549)

* fix bn double grad, test=develop

* update, test=develop
上级 c143326d
...@@ -839,6 +839,7 @@ void BatchNormDoubleGradMaker<T>::Apply(GradOpPtr<T> op) const { ...@@ -839,6 +839,7 @@ void BatchNormDoubleGradMaker<T>::Apply(GradOpPtr<T> op) const {
op->SetInput("SavedMean", this->Input("SavedMean")); op->SetInput("SavedMean", this->Input("SavedMean"));
op->SetInput("SavedVariance", this->Input("SavedVariance")); op->SetInput("SavedVariance", this->Input("SavedVariance"));
if (BOOST_GET_CONST(bool, this->GetAttr("use_global_stats"))) { if (BOOST_GET_CONST(bool, this->GetAttr("use_global_stats"))) {
op->SetInput("Mean", this->Input("Mean"));
op->SetInput("Variance", this->Input("Variance")); op->SetInput("Variance", this->Input("Variance"));
} }
op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X"))); op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
...@@ -868,14 +869,19 @@ void BatchNormDoubleGradOp::InferShape( ...@@ -868,14 +869,19 @@ void BatchNormDoubleGradOp::InferShape(
"BatchNormDoubleGrad"); "BatchNormDoubleGrad");
} }
OP_INOUT_CHECK(ctx->HasInput("DDX"), "Input", "DDX", "BatchNormDoubleGrad");
OP_INOUT_CHECK(ctx->HasInput("DY"), "Input", "DY", "BatchNormDoubleGrad"); OP_INOUT_CHECK(ctx->HasInput("DY"), "Input", "DY", "BatchNormDoubleGrad");
// check output // check output
OP_INOUT_CHECK(ctx->HasOutput("DX"), "Output", "DX", "BatchNormDoubleGrad"); OP_INOUT_CHECK(ctx->HasOutput("DX"), "Output", "DX", "BatchNormDoubleGrad");
const auto x_dims = ctx->GetInputDim("X"); const auto x_dims = ctx->GetInputDim("X");
const int C = x_dims[1]; const DataLayout data_layout = framework::StringToDataLayout(
ctx->Attrs().Get<std::string>("data_layout"));
const int C =
((this->IsMKLDNNType() == true) || (data_layout == DataLayout::kNCHW)
? x_dims[1]
: x_dims[x_dims.size() - 1]);
if (ctx->HasOutput("DX")) { if (ctx->HasOutput("DX")) {
ctx->SetOutputDim("DX", x_dims); ctx->SetOutputDim("DX", x_dims);
} }
...@@ -957,7 +963,9 @@ class BatchNormDoubleGradKernel<platform::CPUDeviceContext, T> ...@@ -957,7 +963,9 @@ class BatchNormDoubleGradKernel<platform::CPUDeviceContext, T>
Tensor inv_var_tensor; Tensor inv_var_tensor;
if (use_global_stats) { if (use_global_stats) {
const auto *running_mean = ctx.Input<Tensor>("Mean");
const auto *running_variance = ctx.Input<Tensor>("Variance"); const auto *running_variance = ctx.Input<Tensor>("Variance");
mean_data = running_mean->data<T>();
inv_var_tensor.Resize({C}); inv_var_tensor.Resize({C});
T *running_inv_var_data = inv_var_tensor.mutable_data<T>(ctx.GetPlace()); T *running_inv_var_data = inv_var_tensor.mutable_data<T>(ctx.GetPlace());
...@@ -1077,12 +1085,12 @@ class BatchNormDoubleGradKernel<platform::CPUDeviceContext, T> ...@@ -1077,12 +1085,12 @@ class BatchNormDoubleGradKernel<platform::CPUDeviceContext, T>
// (np.mean(dy, axis=(n,h,w)) - dy) + inv_var.pow(3) / NxHxW * // (np.mean(dy, axis=(n,h,w)) - dy) + inv_var.pow(3) / NxHxW *
// np.sum(dy, // np.sum(dy,
// axis=(n,h,w)) * (x - mean) * // axis=(n,h,w)) * (x - mean) *
// (np.mean(ddx, axis=(n,h,w)) - ddx) + ddr * (dy * inv_var - // (np.mean(ddx, axis=(n,h,w)) - ddx)) + ddr * (dy * inv_var -
// inv_var // inv_var
// * // *
// np.mean(dy, axis=(n,h,w)) - // np.mean(dy, axis=(n,h,w)) -
// inv_var.pow(3) * (x - mean) * np.mean(dy * (x - mean), // inv_var.pow(3) * (x - mean) * np.mean(dy * (x - mean),
// axis=(n,h,w)))) // axis=(n,h,w)))
if (ddX) { if (ddX) {
dx_arr += dx_arr +=
...@@ -1176,7 +1184,8 @@ class BatchNormDoubleGradKernel<platform::CPUDeviceContext, T> ...@@ -1176,7 +1184,8 @@ class BatchNormDoubleGradKernel<platform::CPUDeviceContext, T>
C, sample_size); C, sample_size);
ddy_arr.setZero(); ddy_arr.setZero();
if (use_global_stats) { if (use_global_stats) {
// math: ddy = r * ddx * inv_var // math: ddy = r * ddx * inv_var + ddbias +
// ddscale * (x - mean) * inv_var
if (ddX) { if (ddX) {
ddy_arr = scale_tile_data * ddx_arr * inv_var_tile_data; ddy_arr = scale_tile_data * ddx_arr * inv_var_tile_data;
} }
...@@ -1196,25 +1205,29 @@ class BatchNormDoubleGradKernel<platform::CPUDeviceContext, T> ...@@ -1196,25 +1205,29 @@ class BatchNormDoubleGradKernel<platform::CPUDeviceContext, T>
.replicate(1, sample_size) / .replicate(1, sample_size) /
sample_size); sample_size);
} }
if (ddScale && ddBias) { }
ConstEigenVectorArrayMap<T> ddscale_arr(ddScale->data<T>(), C); if (ddScale) {
Tensor ddscale_tile; ConstEigenVectorArrayMap<T> ddscale_arr(ddScale->data<T>(), C);
ddscale_tile.Resize({C, sample_size}); Tensor ddscale_tile;
EigenArrayMap<T> ddscale_tile_data( ddscale_tile.Resize({C, sample_size});
ddscale_tile.mutable_data<T>(ctx.GetPlace()), C, sample_size); EigenArrayMap<T> ddscale_tile_data(
ddscale_tile_data = ddscale_arr.replicate(1, sample_size); ddscale_tile.mutable_data<T>(ctx.GetPlace()), C, sample_size);
ddscale_tile_data = ddscale_arr.replicate(1, sample_size);
ddy_arr += x_sub_mean_mul_invstd_arr * ddscale_tile_data;
}
ConstEigenVectorArrayMap<T> ddbias_arr(ddBias->data<T>(), C); if (ddBias) {
Tensor ddbias_tile; ConstEigenVectorArrayMap<T> ddbias_arr(ddBias->data<T>(), C);
ddbias_tile.Resize({C, sample_size}); Tensor ddbias_tile;
EigenArrayMap<T> ddbias_tile_data( ddbias_tile.Resize({C, sample_size});
ddbias_tile.mutable_data<T>(ctx.GetPlace()), C, sample_size); EigenArrayMap<T> ddbias_tile_data(
ddbias_tile_data = ddbias_arr.replicate(1, sample_size); ddbias_tile.mutable_data<T>(ctx.GetPlace()), C, sample_size);
ddbias_tile_data = ddbias_arr.replicate(1, sample_size);
ddy_arr += x_sub_mean_mul_invstd_arr * ddscale_tile_data; ddy_arr += ddbias_tile_data;
ddy_arr += ddbias_tile_data;
}
} }
if (data_layout == DataLayout::kNCHW) { if (data_layout == DataLayout::kNCHW) {
VLOG(3) << "Transform batchnorm output from NHWC to NCHW"; VLOG(3) << "Transform batchnorm output from NHWC to NCHW";
TransToChannelFirst<paddle::platform::CPUDeviceContext, T>( TransToChannelFirst<paddle::platform::CPUDeviceContext, T>(
......
...@@ -520,11 +520,11 @@ class InstanceNormDoubleGradKernel<platform::CPUDeviceContext, T> ...@@ -520,11 +520,11 @@ class InstanceNormDoubleGradKernel<platform::CPUDeviceContext, T>
// (np.mean(dy, axis=(h,w)) - dy) + inv_var.pow(3) / HxW * // (np.mean(dy, axis=(h,w)) - dy) + inv_var.pow(3) / HxW *
// np.sum(dy, // np.sum(dy,
// axis=(h,w)) * (x - mean) * // axis=(h,w)) * (x - mean) *
// (np.mean(ddx, axis=(h,w)) - ddx) + ddr * (dy * inv_var - inv_var // (np.mean(ddx, axis=(h,w)) - ddx)) + ddr * (dy * inv_var -
// * // inv_var *
// np.mean(dy, axis=(h,w)) - // np.mean(dy, axis=(h,w)) -
// inv_var.pow(3) * (x - mean) * np.mean(dy * (x - mean), // inv_var.pow(3) * (x - mean) * np.mean(dy * (x - mean),
// axis=(h,w)))) // axis=(h,w)))
Tensor x_sub_mean_mul_invstd; Tensor x_sub_mean_mul_invstd;
x_sub_mean_mul_invstd.Resize({sample_size, NxC}); x_sub_mean_mul_invstd.Resize({sample_size, NxC});
......
...@@ -40,12 +40,12 @@ using DataLayout = framework::DataLayout; ...@@ -40,12 +40,12 @@ using DataLayout = framework::DataLayout;
// (np.mean(dy, axis=(n,h,w)) - dy) + inv_var.pow(3) / NxHxW * // (np.mean(dy, axis=(n,h,w)) - dy) + inv_var.pow(3) / NxHxW *
// np.sum(dy, // np.sum(dy,
// axis=(n,h,w)) * (x - mean) * // axis=(n,h,w)) * (x - mean) *
// (np.mean(ddx, axis=(n,h,w)) - ddx) + ddr * (dy * inv_var - // (np.mean(ddx, axis=(n,h,w)) - ddx)) + ddr * (dy * inv_var -
// inv_var // inv_var
// * // *
// np.mean(dy, axis=(n,h,w)) - // np.mean(dy, axis=(n,h,w)) -
// inv_var.pow(3) * (x - mean) * np.mean(dy * (x - mean), // inv_var.pow(3) * (x - mean) * np.mean(dy * (x - mean),
// axis=(n,h,w)))) // axis=(n,h,w)))
template <typename T, int BlockDim, framework::DataLayout layout> template <typename T, int BlockDim, framework::DataLayout layout>
__global__ void DoubleGradComputeDX(const T *x, const T *mean, __global__ void DoubleGradComputeDX(const T *x, const T *mean,
...@@ -138,7 +138,7 @@ __global__ void DoubleGradComputeDX(const T *x, const T *mean, ...@@ -138,7 +138,7 @@ __global__ void DoubleGradComputeDX(const T *x, const T *mean,
? (j / sample_size * C + i) * sample_size + j % sample_size ? (j / sample_size * C + i) * sample_size + j % sample_size
: j * outer_size + i; : j * outer_size + i;
dx[index] += (dy[index] * var_val - dy_sum_val / inner_size * var_val - dx[index] += (dy[index] * var_val - dy_sum_val / inner_size * var_val -
(x[index] - mean_val) * var_val * (x[index] - mean_val) * var_val * var_val *
dy_mul_x_sub_mean_sum_val * var_val / inner_size) * dy_mul_x_sub_mean_sum_val * var_val / inner_size) *
ddscale[i]; ddscale[i];
} }
...@@ -326,19 +326,57 @@ __global__ void DoubleGradComputeDScaleWithGlobal( ...@@ -326,19 +326,57 @@ __global__ void DoubleGradComputeDScaleWithGlobal(
} }
// math: dx = ddscale * dy * inv_var // math: dx = ddscale * dy * inv_var
// math: ddy = scale * ddx * inv_var
template <typename T, framework::DataLayout layout> template <typename T, framework::DataLayout layout>
__global__ void DoubleGradComputeDataWithGlobal( __global__ void DoubleGradComputeDXWithGlobal(const T *dy, const T *ddscale,
const T *dy, const T *scale, const T *variance, const double epsilon, const T *variance,
const int C, const int sample_size, const int num, T *dx) { const double epsilon, const int C,
const int sample_size,
const int num, T *dx) {
int gid = blockIdx.x * blockDim.x + threadIdx.x; int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x; int stride = blockDim.x * gridDim.x;
if (scale != nullptr) { if (ddscale != nullptr) {
for (int i = gid; i < num; i += stride) { for (int i = gid; i < num; i += stride) {
const int c = const int c =
layout == framework::DataLayout::kNCHW ? i / sample_size % C : i % C; layout == framework::DataLayout::kNCHW ? i / sample_size % C : i % C;
T inv_var = 1.0 / sqrt(variance[c] + epsilon); T inv_var = 1.0 / sqrt(variance[c] + epsilon);
dx[i] = dy[i] * scale[c] * inv_var; dx[i] = dy[i] * ddscale[c] * inv_var;
}
}
}
// math: ddy = scale * ddx * inv_var + ddbias +
// ddscale * (x - mean) * inv_var
template <typename T, framework::DataLayout layout>
__global__ void DoubleGradComputeDDYWithGlobal(
const T *ddx, const T *scale, const T *mean, const T *variance, const T *x,
const T *ddbias, const T *ddscale, const double epsilon, const int C,
const int sample_size, const int num, T *ddy) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (ddx != nullptr) {
for (int i = gid; i < num; i += stride) {
const int c =
layout == framework::DataLayout::kNCHW ? i / sample_size % C : i % C;
T inv_var = 1.0 / sqrt(variance[c] + epsilon);
ddy[i] += ddx[i] * scale[c] * inv_var;
}
}
__syncthreads();
if (ddscale != nullptr) {
for (int i = gid; i < num; i += stride) {
const int c =
layout == framework::DataLayout::kNCHW ? i / sample_size % C : i % C;
T inv_var = 1.0 / sqrt(variance[c] + epsilon);
ddy[i] += (x[i] - mean[c]) * inv_var * ddscale[c];
}
}
__syncthreads();
if (ddbias != nullptr) {
for (int i = gid; i < num; i += stride) {
const int c =
layout == framework::DataLayout::kNCHW ? i / sample_size % C : i % C;
ddy[i] += ddbias[c];
} }
} }
} }
...@@ -383,8 +421,11 @@ void NormDoubleGradFunctor(const framework::ExecutionContext &ctx, ...@@ -383,8 +421,11 @@ void NormDoubleGradFunctor(const framework::ExecutionContext &ctx,
const T *mean_data, *variance_data; const T *mean_data, *variance_data;
if (use_global_stats) { if (use_global_stats) {
const auto *running_mean = ctx.Input<Tensor>("Mean");
const auto *running_var = ctx.Input<Tensor>("Variance"); const auto *running_var = ctx.Input<Tensor>("Variance");
const auto *running_mean_data = running_mean->template data<T>();
const auto *running_var_data = running_var->template data<T>(); const auto *running_var_data = running_var->template data<T>();
mean_data = running_mean_data;
variance_data = running_var_data; variance_data = running_var_data;
} else { } else {
const T *smean_data = Saved_mean->data<T>(); const T *smean_data = Saved_mean->data<T>();
...@@ -398,12 +439,12 @@ void NormDoubleGradFunctor(const framework::ExecutionContext &ctx, ...@@ -398,12 +439,12 @@ void NormDoubleGradFunctor(const framework::ExecutionContext &ctx,
set_constant(dev_ctx, dX, static_cast<T>(0)); set_constant(dev_ctx, dX, static_cast<T>(0));
if (use_global_stats) { if (use_global_stats) {
if (data_layout == DataLayout::kNHWC) { if (data_layout == DataLayout::kNHWC) {
DoubleGradComputeDataWithGlobal< DoubleGradComputeDXWithGlobal<
T, DataLayout::kNHWC><<<grid1, block, 0, dev_ctx.stream()>>>( T, DataLayout::kNHWC><<<grid1, block, 0, dev_ctx.stream()>>>(
dy_data, ddscale_data, variance_data, epsilon, C, sample_size, num, dy_data, ddscale_data, variance_data, epsilon, C, sample_size, num,
dx_data); dx_data);
} else { } else {
DoubleGradComputeDataWithGlobal< DoubleGradComputeDXWithGlobal<
T, DataLayout::kNCHW><<<grid1, block, 0, dev_ctx.stream()>>>( T, DataLayout::kNCHW><<<grid1, block, 0, dev_ctx.stream()>>>(
dy_data, ddscale_data, variance_data, epsilon, C, sample_size, num, dy_data, ddscale_data, variance_data, epsilon, C, sample_size, num,
dx_data); dx_data);
...@@ -456,15 +497,15 @@ void NormDoubleGradFunctor(const framework::ExecutionContext &ctx, ...@@ -456,15 +497,15 @@ void NormDoubleGradFunctor(const framework::ExecutionContext &ctx,
set_constant(dev_ctx, ddY, static_cast<T>(0)); set_constant(dev_ctx, ddY, static_cast<T>(0));
if (use_global_stats) { if (use_global_stats) {
if (data_layout == DataLayout::kNHWC) { if (data_layout == DataLayout::kNHWC) {
DoubleGradComputeDataWithGlobal< DoubleGradComputeDDYWithGlobal<
T, DataLayout::kNHWC><<<grid1, block, 0, dev_ctx.stream()>>>( T, DataLayout::kNHWC><<<grid1, block, 0, dev_ctx.stream()>>>(
ddx_data, scale_data, variance_data, epsilon, C, sample_size, num, ddx_data, scale_data, mean_data, variance_data, x_data, ddbias_data,
ddy_data); ddscale_data, epsilon, C, sample_size, num, ddy_data);
} else { } else {
DoubleGradComputeDataWithGlobal< DoubleGradComputeDDYWithGlobal<
T, DataLayout::kNCHW><<<grid1, block, 0, dev_ctx.stream()>>>( T, DataLayout::kNCHW><<<grid1, block, 0, dev_ctx.stream()>>>(
ddx_data, scale_data, variance_data, epsilon, C, sample_size, num, ddx_data, scale_data, mean_data, variance_data, x_data, ddbias_data,
ddy_data); ddscale_data, epsilon, C, sample_size, num, ddy_data);
} }
} else { } else {
if (data_layout == DataLayout::kNHWC) { if (data_layout == DataLayout::kNHWC) {
......
...@@ -130,5 +130,41 @@ class TestBatchNormDoubleGradCheckCase4(TestBatchNormDoubleGradCheck): ...@@ -130,5 +130,41 @@ class TestBatchNormDoubleGradCheckCase4(TestBatchNormDoubleGradCheck):
self.shape = [2, 2, 3, 4, 5] self.shape = [2, 2, 3, 4, 5]
class TestBatchNormDoubleGradCheckCase5(TestBatchNormDoubleGradCheck):
@prog_scope()
def func(self, place):
prog = fluid.Program()
with fluid.program_guard(prog):
np.random.seed()
dtype = "float32"
eps = 0.005
atol = 2e-4
chn = self.shape[1] if self.data_layout == 'NCHW' else self.shape[
-1]
x = layers.create_parameter(dtype=dtype, shape=self.shape, name='x')
z = fluid.layers.batch_norm(
input=x,
data_layout=self.data_layout,
use_global_stats=self.use_global_stats)
x_arr = np.random.uniform(-1, 1, self.shape).astype(dtype)
w, b = prog.global_block().all_parameters()[1:3]
w_arr = np.ones(chn).astype(dtype)
b_arr = np.zeros(chn).astype(dtype)
gradient_checker.double_grad_check(
[x, w, b],
z,
x_init=[x_arr, w_arr, b_arr],
atol=atol,
place=place,
eps=eps)
class TestBatchNormDoubleGradCheckCase6(TestBatchNormDoubleGradCheckCase5):
def init_test(self):
self.data_layout = 'NCHW'
self.use_global_stats = True
self.shape = [2, 3, 4, 5]
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册