From 1f2afccf300d7a9dcf713db2708456e8d2ff460c Mon Sep 17 00:00:00 2001 From: SunGaofeng Date: Fri, 12 Apr 2019 19:33:35 +0800 Subject: [PATCH] test=develop (#16783) --- paddle/fluid/operators/affine_grid_op.h | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/operators/affine_grid_op.h b/paddle/fluid/operators/affine_grid_op.h index 87d238314..73df8a38b 100644 --- a/paddle/fluid/operators/affine_grid_op.h +++ b/paddle/fluid/operators/affine_grid_op.h @@ -121,9 +121,11 @@ class AffineGridOpKernel : public framework::OpKernel { // TODO(wanghaoshuang): Refine batched matrix multiply auto blas = math::GetBlas(ctx); for (int i = 0; i < n; ++i) { - Tensor sliced_grid = grid.Slice(i, i + 1).Resize({h * w, 3}); + Tensor sliced_grid = grid.Slice(i, i + 1).Resize( + {static_cast(h) * static_cast(w), 3}); Tensor sliced_theta = theta->Slice(i, i + 1).Resize({2, 3}); - Tensor sliced_out = output->Slice(i, i + 1).Resize({h * w, 2}); + Tensor sliced_out = output->Slice(i, i + 1).Resize( + {static_cast(h) * static_cast(w), 2}); blas.MatMul(sliced_grid, false, sliced_theta, true, T(1), &sliced_out, T(0)); } @@ -161,8 +163,10 @@ class AffineGridGradOpKernel : public framework::OpKernel { // TODO(wanghaoshuang): Refine batched matrix multiply auto blas = math::GetBlas(ctx); for (int i = 0; i < n; ++i) { - Tensor sliced_grid = grid.Slice(i, i + 1).Resize({h * w, 3}); - Tensor sliced_out_grad = output_grad->Slice(i, i + 1).Resize({h * w, 2}); + Tensor sliced_grid = grid.Slice(i, i + 1).Resize( + {static_cast(h) * static_cast(w), 3}); + Tensor sliced_out_grad = output_grad->Slice(i, i + 1).Resize( + {static_cast(h) * static_cast(w), 2}); Tensor sliced_theta_grad = theta_grad->Slice(i, i + 1).Resize({2, 3}); blas.MatMul(sliced_out_grad, true, sliced_grid, false, T(1), &sliced_theta_grad, T(0)); -- GitLab