spectral_norm_op.h 10.5 KB
Newer Older
1
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
D
dengkaipeng 已提交
2 3 4 5 6 7 8 9 10 11 12
   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at
   http://www.apache.org/licenses/LICENSE-2.0
   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License. */

#pragma once
D
dengkaipeng 已提交
13
#include <vector>
14

D
dengkaipeng 已提交
15 16
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
17 18
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/math_function.h"
D
dengkaipeng 已提交
19 20 21 22

namespace paddle {
namespace operators {

23 24 25
template <typename T,
          size_t D,
          int MajorType = Eigen::RowMajor,
D
dengkaipeng 已提交
26 27 28 29 30 31 32 33
          typename IndexType = Eigen::DenseIndex>
using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>;
using Tensor = framework::Tensor;

using Array1 = Eigen::DSizes<int64_t, 1>;
using Array2 = Eigen::DSizes<int64_t, 2>;
using IndexPair = Eigen::IndexPair<int>;

D
dengkaipeng 已提交
34
template <typename DeviceContext, typename T>
35 36 37
static inline void TransCompute(const int rank,
                                const Tensor& in,
                                Tensor* out,
D
dengkaipeng 已提交
38 39 40
                                const std::vector<int>& perm,
                                const DeviceContext& dev_ctx) {
  if (rank <= 1 || rank > 5) {
41 42 43
    PADDLE_THROW(paddle::platform::errors::Fatal(
        "Weight rank of SpectralNorm should be in range [2, 5], but got %d.",
        rank));
D
dengkaipeng 已提交
44 45 46 47
  }

  switch (rank) {
    case 2:
48
      phi::funcs::Transpose<DeviceContext, T, 2> trans2;
D
dengkaipeng 已提交
49 50 51
      trans2(dev_ctx, in, out, perm);
      break;
    case 3:
52
      phi::funcs::Transpose<DeviceContext, T, 3> trans3;
D
dengkaipeng 已提交
53 54 55
      trans3(dev_ctx, in, out, perm);
      break;
    case 4:
56
      phi::funcs::Transpose<DeviceContext, T, 4> trans4;
D
dengkaipeng 已提交
57 58 59
      trans4(dev_ctx, in, out, perm);
      break;
    case 5:
60
      phi::funcs::Transpose<DeviceContext, T, 5> trans5;
D
dengkaipeng 已提交
61 62 63 64
      trans5(dev_ctx, in, out, perm);
      break;
    default:
      break;
D
dengkaipeng 已提交
65 66 67 68 69
  }
}

template <typename DeviceContext, typename T>
static inline void CalcMatrixSigmaAndNormWeight(
70 71 72 73 74 75 76
    Tensor* sigma,
    Tensor* u,
    Tensor* v,
    Tensor* weight,
    const int power_iters,
    const float eps,
    const framework::ExecutionContext& ctx) {
D
dengkaipeng 已提交
77
  auto& place = *ctx.template device_context<DeviceContext>().eigen_device();
78
  auto blas = phi::funcs::GetBlas<DeviceContext, T>(ctx);
D
dengkaipeng 已提交
79 80
  auto sigma_t = EigenTensor<T, 2>::From(*sigma);
  auto weight_t = EigenTensor<T, 2>::From(*weight);
81 82
  auto u_t = EigenTensor<T, 2>::From(*u);
  auto v_t = EigenTensor<T, 2>::From(*v);
D
dengkaipeng 已提交
83 84 85 86 87

  const int h = weight->dims()[0];
  const int w = weight->dims()[1];

  for (int i = 0; i < power_iters; i++) {
D
dengkaipeng 已提交
88
    // V = W^T * U / ||W^T * U||_2
89
    blas.MatMul(*weight, true, *u, false, T(1), v, T(0));
D
dengkaipeng 已提交
90 91 92 93
    auto v_t_norm =
        v_t.square().sum().sqrt().eval().reshape(Array1(1)).broadcast(
            Array1(w));
    v_t.device(place) = v_t / (v_t_norm + v_t_norm.constant(eps));
D
dengkaipeng 已提交
94
    // U = W^T * V / ||W^T * V||_2
95
    blas.MatMul(*weight, false, *v, false, T(1), u, T(0));
D
dengkaipeng 已提交
96 97 98 99 100
    auto u_t_norm =
        u_t.square().sum().sqrt().eval().reshape(Array1(1)).broadcast(
            Array1(h));
    u_t.device(place) = u_t / (u_t_norm + u_t_norm.constant(eps));
  }
101 102 103 104 105
  Tensor weight_v;
  weight_v.mutable_data<T>({h, 1}, ctx.GetPlace());
  blas.MatMul(*weight, false, *v, false, T(1), &weight_v, T(0));
  auto weight_v_t = EigenTensor<T, 2>::From(weight_v);
  sigma_t.device(place) = (u_t * weight_v_t)
D
dengkaipeng 已提交
106 107 108 109 110 111 112 113 114 115 116
                              .sum()
                              .eval()
                              .reshape(Array2(1, 1))
                              .broadcast(Array2(h, w));
  weight_t.device(place) = weight_t / sigma_t;
}

template <typename DeviceContext, typename T>
class SpectralNormKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
D
dengkaipeng 已提交
117
    auto& dev_ctx = ctx.template device_context<DeviceContext>();
D
dengkaipeng 已提交
118 119 120 121 122 123 124 125 126
    auto weight = ctx.Input<Tensor>("Weight");
    auto u = ctx.Input<Tensor>("U");
    auto v = ctx.Input<Tensor>("V");
    auto out = ctx.Output<Tensor>("Out");

    int dim = ctx.Attr<int>("dim");
    int power_iters = ctx.Attr<int>("power_iters");
    float eps = ctx.Attr<float>("eps");

D
dengkaipeng 已提交
127 128 129
    const int h = u->dims()[0];
    const int w = v->dims()[0];

D
dengkaipeng 已提交
130
    Tensor weight_mat;
D
dengkaipeng 已提交
131 132 133 134 135 136 137 138 139 140 141 142 143
    auto dims = weight->dims();
    const int rank = dims.size();
    std::vector<int> real_dims;
    if (dim != 0) {
      std::vector<int> perm;
      perm.push_back(dim);
      real_dims.push_back(dims[dim]);
      for (int i = 0; i < rank; i++) {
        if (i != dim) {
          perm.push_back(i);
          real_dims.push_back(dims[i]);
        }
      }
144
      weight_mat.mutable_data<T>(phi::make_ddim(real_dims), ctx.GetPlace());
D
dengkaipeng 已提交
145 146 147 148 149
      TransCompute<DeviceContext, T>(rank, *weight, &weight_mat, perm, dev_ctx);
    } else {
      for (int i = 0; i < rank; i++) {
        real_dims.push_back(i);
      }
150
      paddle::framework::TensorCopySync(*weight, ctx.GetPlace(), &weight_mat);
D
dengkaipeng 已提交
151
    }
152
    weight_mat = weight_mat.Resize({h, w});
D
dengkaipeng 已提交
153 154

    Tensor sigma;
155
    sigma.mutable_data<T>(weight_mat.dims(), ctx.GetPlace());
D
dengkaipeng 已提交
156
    Tensor uu, vv;
157 158
    paddle::framework::TensorCopySync(*u, ctx.GetPlace(), &uu);
    paddle::framework::TensorCopySync(*v, ctx.GetPlace(), &vv);
159 160 161 162 163 164 165
    CalcMatrixSigmaAndNormWeight<DeviceContext, T>(&sigma,
                                                   &(uu.Resize({h, 1})),
                                                   &(vv.Resize({w, 1})),
                                                   &weight_mat,
                                                   power_iters,
                                                   eps,
                                                   ctx);
D
dengkaipeng 已提交
166 167 168 169 170 171 172 173 174 175 176 177 178 179

    if (dim != 0) {
      std::vector<int> perm;
      for (int i = 0; i < rank; i++) {
        if (i < dim) {
          perm.push_back(i + 1);
        } else if (i == dim) {
          perm.push_back(0);
        } else {
          perm.push_back(i);
        }
      }
      out->mutable_data<T>(dims, ctx.GetPlace());
      TransCompute<DeviceContext, T>(
180 181 182 183
          rank,
          weight_mat.Resize(phi::make_ddim(real_dims)),
          out,
          perm,
D
dengkaipeng 已提交
184 185
          dev_ctx);
    } else {
186 187
      paddle::framework::TensorCopySync(
          weight_mat.Resize(dims), ctx.GetPlace(), out);
D
dengkaipeng 已提交
188
    }
D
dengkaipeng 已提交
189 190 191 192 193 194
  }
};

template <typename DeviceContext, typename T>
class SpectralNormGradKernel : public framework::OpKernel<T> {
 public:
195 196
  void Compute(const framework::ExecutionContext& ctx) const override {
    auto& place = *ctx.template device_context<DeviceContext>().eigen_device();
D
dengkaipeng 已提交
197
    auto& dev_ctx = ctx.template device_context<DeviceContext>();
198
    auto blas = phi::funcs::GetBlas<DeviceContext, T>(ctx);
199 200 201 202 203 204 205 206 207 208
    auto weight = ctx.Input<Tensor>("Weight");
    auto u = ctx.Input<Tensor>("U");
    auto v = ctx.Input<Tensor>("V");
    auto out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
    auto weight_grad = ctx.Output<Tensor>(framework::GradVarName("Weight"));

    int dim = ctx.Attr<int>("dim");
    int power_iters = ctx.Attr<int>("power_iters");
    float eps = ctx.Attr<float>("eps");

D
dengkaipeng 已提交
209 210 211
    const int h = u->dims()[0];
    const int w = v->dims()[0];

212
    Tensor weight_mat, out_grad_mat;
D
dengkaipeng 已提交
213 214 215 216 217 218 219 220 221 222 223 224 225
    auto dims = weight->dims();
    const int rank = dims.size();
    std::vector<int> real_dims;
    if (dim != 0) {
      std::vector<int> perm;
      perm.push_back(dim);
      real_dims.push_back(dims[dim]);
      for (int i = 0; i < rank; i++) {
        if (i != dim) {
          perm.push_back(i);
          real_dims.push_back(dims[i]);
        }
      }
226 227
      weight_mat.mutable_data<T>(phi::make_ddim(real_dims), ctx.GetPlace());
      out_grad_mat.mutable_data<T>(phi::make_ddim(real_dims), ctx.GetPlace());
D
dengkaipeng 已提交
228
      TransCompute<DeviceContext, T>(rank, *weight, &weight_mat, perm, dev_ctx);
229 230
      TransCompute<DeviceContext, T>(
          rank, *out_grad, &out_grad_mat, perm, dev_ctx);
D
dengkaipeng 已提交
231 232 233 234
    } else {
      for (int i = 0; i < rank; i++) {
        real_dims.push_back(i);
      }
235
      paddle::framework::TensorCopySync(*weight, ctx.GetPlace(), &weight_mat);
236 237
      paddle::framework::TensorCopySync(
          *out_grad, ctx.GetPlace(), &out_grad_mat);
D
dengkaipeng 已提交
238
    }
239 240 241 242 243 244
    weight_mat = weight_mat.Resize({h, w});
    out_grad_mat = out_grad_mat.Resize({h, w});

    Tensor sigma;
    sigma.mutable_data<T>(weight_mat.dims(), ctx.GetPlace());
    Tensor uu, vv;
245 246
    paddle::framework::TensorCopySync(*u, ctx.GetPlace(), &uu);
    paddle::framework::TensorCopySync(*v, ctx.GetPlace(), &vv);
247 248 249 250 251 252 253
    CalcMatrixSigmaAndNormWeight<DeviceContext, T>(&sigma,
                                                   &(uu.Resize({h, 1})),
                                                   &(vv.Resize({w, 1})),
                                                   &weight_mat,
                                                   power_iters,
                                                   eps,
                                                   ctx);
254 255 256

    Tensor uv;
    uv.mutable_data<T>({h, w}, ctx.GetPlace());
257 258
    blas.MatMul(
        uu.Resize({h, 1}), false, vv.Resize({w, 1}), false, T(1), &uv, T(0));
259

D
dengkaipeng 已提交
260
    Tensor weight_grad_mat;
261 262 263 264 265 266 267 268 269
    weight_grad_mat.mutable_data<T>({h, w}, ctx.GetPlace());
    auto weight_grad_mat_t = EigenTensor<T, 2>::From(weight_grad_mat);
    auto weight_mat_t = EigenTensor<T, 2>::From(weight_mat);
    auto out_grad_mat_t = EigenTensor<T, 2>::From(out_grad_mat);
    auto sigma_t = EigenTensor<T, 2>::From(sigma);
    auto uv_t = EigenTensor<T, 2>::From(uv);
    weight_mat_t.device(place) =
        weight_mat_t.sum().eval().reshape(Array2(1, 1)).broadcast(Array2(h, w));
    weight_grad_mat_t.device(place) =
D
dengkaipeng 已提交
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
        out_grad_mat_t * (out_grad_mat_t.constant(1.0) - uv_t * weight_mat_t) /
        sigma_t;

    if (dim != 0) {
      std::vector<int> perm;
      for (int i = 0; i < rank; i++) {
        if (i < dim) {
          perm.push_back(i + 1);
        } else if (i == dim) {
          perm.push_back(0);
        } else {
          perm.push_back(i);
        }
      }
      weight_grad->mutable_data<T>(dims, ctx.GetPlace());
      TransCompute<DeviceContext, T>(
286 287 288 289 290
          rank,
          weight_grad_mat.Resize(phi::make_ddim(real_dims)),
          weight_grad,
          perm,
          dev_ctx);
D
dengkaipeng 已提交
291
    } else {
292 293
      paddle::framework::TensorCopySync(
          weight_grad_mat.Resize(dims), ctx.GetPlace(), weight_grad);
D
dengkaipeng 已提交
294
    }
295
  }
D
dengkaipeng 已提交
296 297 298 299
};

}  // namespace operators
}  // namespace paddle