spectral_norm_op.h 9.9 KB
Newer Older
1
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
D
dengkaipeng 已提交
2 3 4 5 6 7 8 9 10 11 12
   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at
   http://www.apache.org/licenses/LICENSE-2.0
   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License. */

#pragma once
D
dengkaipeng 已提交
13
#include <vector>
14

D
dengkaipeng 已提交
15 16
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
17 18
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/math_function.h"
D
dengkaipeng 已提交
19 20 21 22 23 24 25 26 27 28 29 30 31

namespace paddle {
namespace operators {

template <typename T, size_t D, int MajorType = Eigen::RowMajor,
          typename IndexType = Eigen::DenseIndex>
using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>;
using Tensor = framework::Tensor;

using Array1 = Eigen::DSizes<int64_t, 1>;
using Array2 = Eigen::DSizes<int64_t, 2>;
using IndexPair = Eigen::IndexPair<int>;

D
dengkaipeng 已提交
32 33 34 35 36
template <typename DeviceContext, typename T>
static inline void TransCompute(const int rank, const Tensor& in, Tensor* out,
                                const std::vector<int>& perm,
                                const DeviceContext& dev_ctx) {
  if (rank <= 1 || rank > 5) {
37 38 39
    PADDLE_THROW(paddle::platform::errors::Fatal(
        "Weight rank of SpectralNorm should be in range [2, 5], but got %d.",
        rank));
D
dengkaipeng 已提交
40 41 42 43
  }

  switch (rank) {
    case 2:
44
      phi::funcs::Transpose<DeviceContext, T, 2> trans2;
D
dengkaipeng 已提交
45 46 47
      trans2(dev_ctx, in, out, perm);
      break;
    case 3:
48
      phi::funcs::Transpose<DeviceContext, T, 3> trans3;
D
dengkaipeng 已提交
49 50 51
      trans3(dev_ctx, in, out, perm);
      break;
    case 4:
52
      phi::funcs::Transpose<DeviceContext, T, 4> trans4;
D
dengkaipeng 已提交
53 54 55
      trans4(dev_ctx, in, out, perm);
      break;
    case 5:
56
      phi::funcs::Transpose<DeviceContext, T, 5> trans5;
D
dengkaipeng 已提交
57 58 59 60
      trans5(dev_ctx, in, out, perm);
      break;
    default:
      break;
D
dengkaipeng 已提交
61 62 63 64 65 66 67 68
  }
}

template <typename DeviceContext, typename T>
static inline void CalcMatrixSigmaAndNormWeight(
    Tensor* sigma, Tensor* u, Tensor* v, Tensor* weight, const int power_iters,
    const float eps, const framework::ExecutionContext& ctx) {
  auto& place = *ctx.template device_context<DeviceContext>().eigen_device();
69
  auto blas = phi::funcs::GetBlas<DeviceContext, T>(ctx);
D
dengkaipeng 已提交
70 71
  auto sigma_t = EigenTensor<T, 2>::From(*sigma);
  auto weight_t = EigenTensor<T, 2>::From(*weight);
72 73
  auto u_t = EigenTensor<T, 2>::From(*u);
  auto v_t = EigenTensor<T, 2>::From(*v);
D
dengkaipeng 已提交
74 75 76 77 78

  const int h = weight->dims()[0];
  const int w = weight->dims()[1];

  for (int i = 0; i < power_iters; i++) {
D
dengkaipeng 已提交
79
    // V = W^T * U / ||W^T * U||_2
80
    blas.MatMul(*weight, true, *u, false, T(1), v, T(0));
D
dengkaipeng 已提交
81 82 83 84
    auto v_t_norm =
        v_t.square().sum().sqrt().eval().reshape(Array1(1)).broadcast(
            Array1(w));
    v_t.device(place) = v_t / (v_t_norm + v_t_norm.constant(eps));
D
dengkaipeng 已提交
85
    // U = W^T * V / ||W^T * V||_2
86
    blas.MatMul(*weight, false, *v, false, T(1), u, T(0));
D
dengkaipeng 已提交
87 88 89 90 91
    auto u_t_norm =
        u_t.square().sum().sqrt().eval().reshape(Array1(1)).broadcast(
            Array1(h));
    u_t.device(place) = u_t / (u_t_norm + u_t_norm.constant(eps));
  }
92 93 94 95 96
  Tensor weight_v;
  weight_v.mutable_data<T>({h, 1}, ctx.GetPlace());
  blas.MatMul(*weight, false, *v, false, T(1), &weight_v, T(0));
  auto weight_v_t = EigenTensor<T, 2>::From(weight_v);
  sigma_t.device(place) = (u_t * weight_v_t)
D
dengkaipeng 已提交
97 98 99 100 101 102 103 104 105 106 107
                              .sum()
                              .eval()
                              .reshape(Array2(1, 1))
                              .broadcast(Array2(h, w));
  weight_t.device(place) = weight_t / sigma_t;
}

template <typename DeviceContext, typename T>
class SpectralNormKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
D
dengkaipeng 已提交
108
    auto& dev_ctx = ctx.template device_context<DeviceContext>();
D
dengkaipeng 已提交
109 110 111 112 113 114 115 116 117
    auto weight = ctx.Input<Tensor>("Weight");
    auto u = ctx.Input<Tensor>("U");
    auto v = ctx.Input<Tensor>("V");
    auto out = ctx.Output<Tensor>("Out");

    int dim = ctx.Attr<int>("dim");
    int power_iters = ctx.Attr<int>("power_iters");
    float eps = ctx.Attr<float>("eps");

D
dengkaipeng 已提交
118 119 120
    const int h = u->dims()[0];
    const int w = v->dims()[0];

D
dengkaipeng 已提交
121
    Tensor weight_mat;
D
dengkaipeng 已提交
122 123 124 125 126 127 128 129 130 131 132 133 134
    auto dims = weight->dims();
    const int rank = dims.size();
    std::vector<int> real_dims;
    if (dim != 0) {
      std::vector<int> perm;
      perm.push_back(dim);
      real_dims.push_back(dims[dim]);
      for (int i = 0; i < rank; i++) {
        if (i != dim) {
          perm.push_back(i);
          real_dims.push_back(dims[i]);
        }
      }
135
      weight_mat.mutable_data<T>(phi::make_ddim(real_dims), ctx.GetPlace());
D
dengkaipeng 已提交
136 137 138 139 140
      TransCompute<DeviceContext, T>(rank, *weight, &weight_mat, perm, dev_ctx);
    } else {
      for (int i = 0; i < rank; i++) {
        real_dims.push_back(i);
      }
141
      paddle::framework::TensorCopySync(*weight, ctx.GetPlace(), &weight_mat);
D
dengkaipeng 已提交
142
    }
143
    weight_mat = weight_mat.Resize({h, w});
D
dengkaipeng 已提交
144 145

    Tensor sigma;
146
    sigma.mutable_data<T>(weight_mat.dims(), ctx.GetPlace());
D
dengkaipeng 已提交
147
    Tensor uu, vv;
148 149
    paddle::framework::TensorCopySync(*u, ctx.GetPlace(), &uu);
    paddle::framework::TensorCopySync(*v, ctx.GetPlace(), &vv);
D
dengkaipeng 已提交
150
    CalcMatrixSigmaAndNormWeight<DeviceContext, T>(
151 152
        &sigma, &(uu.Resize({h, 1})), &(vv.Resize({w, 1})), &weight_mat,
        power_iters, eps, ctx);
D
dengkaipeng 已提交
153 154 155 156 157 158 159 160 161 162 163 164 165 166

    if (dim != 0) {
      std::vector<int> perm;
      for (int i = 0; i < rank; i++) {
        if (i < dim) {
          perm.push_back(i + 1);
        } else if (i == dim) {
          perm.push_back(0);
        } else {
          perm.push_back(i);
        }
      }
      out->mutable_data<T>(dims, ctx.GetPlace());
      TransCompute<DeviceContext, T>(
167
          rank, weight_mat.Resize(phi::make_ddim(real_dims)), out, perm,
D
dengkaipeng 已提交
168 169
          dev_ctx);
    } else {
170 171
      paddle::framework::TensorCopySync(weight_mat.Resize(dims), ctx.GetPlace(),
                                        out);
D
dengkaipeng 已提交
172
    }
D
dengkaipeng 已提交
173 174 175 176 177 178
  }
};

template <typename DeviceContext, typename T>
class SpectralNormGradKernel : public framework::OpKernel<T> {
 public:
179 180
  void Compute(const framework::ExecutionContext& ctx) const override {
    auto& place = *ctx.template device_context<DeviceContext>().eigen_device();
D
dengkaipeng 已提交
181
    auto& dev_ctx = ctx.template device_context<DeviceContext>();
182
    auto blas = phi::funcs::GetBlas<DeviceContext, T>(ctx);
183 184 185 186 187 188 189 190 191 192
    auto weight = ctx.Input<Tensor>("Weight");
    auto u = ctx.Input<Tensor>("U");
    auto v = ctx.Input<Tensor>("V");
    auto out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
    auto weight_grad = ctx.Output<Tensor>(framework::GradVarName("Weight"));

    int dim = ctx.Attr<int>("dim");
    int power_iters = ctx.Attr<int>("power_iters");
    float eps = ctx.Attr<float>("eps");

D
dengkaipeng 已提交
193 194 195
    const int h = u->dims()[0];
    const int w = v->dims()[0];

196
    Tensor weight_mat, out_grad_mat;
D
dengkaipeng 已提交
197 198 199 200 201 202 203 204 205 206 207 208 209
    auto dims = weight->dims();
    const int rank = dims.size();
    std::vector<int> real_dims;
    if (dim != 0) {
      std::vector<int> perm;
      perm.push_back(dim);
      real_dims.push_back(dims[dim]);
      for (int i = 0; i < rank; i++) {
        if (i != dim) {
          perm.push_back(i);
          real_dims.push_back(dims[i]);
        }
      }
210 211
      weight_mat.mutable_data<T>(phi::make_ddim(real_dims), ctx.GetPlace());
      out_grad_mat.mutable_data<T>(phi::make_ddim(real_dims), ctx.GetPlace());
D
dengkaipeng 已提交
212 213 214 215 216 217 218
      TransCompute<DeviceContext, T>(rank, *weight, &weight_mat, perm, dev_ctx);
      TransCompute<DeviceContext, T>(rank, *out_grad, &out_grad_mat, perm,
                                     dev_ctx);
    } else {
      for (int i = 0; i < rank; i++) {
        real_dims.push_back(i);
      }
219 220 221
      paddle::framework::TensorCopySync(*weight, ctx.GetPlace(), &weight_mat);
      paddle::framework::TensorCopySync(*out_grad, ctx.GetPlace(),
                                        &out_grad_mat);
D
dengkaipeng 已提交
222
    }
223 224 225 226 227 228
    weight_mat = weight_mat.Resize({h, w});
    out_grad_mat = out_grad_mat.Resize({h, w});

    Tensor sigma;
    sigma.mutable_data<T>(weight_mat.dims(), ctx.GetPlace());
    Tensor uu, vv;
229 230
    paddle::framework::TensorCopySync(*u, ctx.GetPlace(), &uu);
    paddle::framework::TensorCopySync(*v, ctx.GetPlace(), &vv);
231 232 233 234 235 236 237 238 239
    CalcMatrixSigmaAndNormWeight<DeviceContext, T>(
        &sigma, &(uu.Resize({h, 1})), &(vv.Resize({w, 1})), &weight_mat,
        power_iters, eps, ctx);

    Tensor uv;
    uv.mutable_data<T>({h, w}, ctx.GetPlace());
    blas.MatMul(uu.Resize({h, 1}), false, vv.Resize({w, 1}), false, T(1), &uv,
                T(0));

D
dengkaipeng 已提交
240
    Tensor weight_grad_mat;
241 242 243 244 245 246 247 248 249
    weight_grad_mat.mutable_data<T>({h, w}, ctx.GetPlace());
    auto weight_grad_mat_t = EigenTensor<T, 2>::From(weight_grad_mat);
    auto weight_mat_t = EigenTensor<T, 2>::From(weight_mat);
    auto out_grad_mat_t = EigenTensor<T, 2>::From(out_grad_mat);
    auto sigma_t = EigenTensor<T, 2>::From(sigma);
    auto uv_t = EigenTensor<T, 2>::From(uv);
    weight_mat_t.device(place) =
        weight_mat_t.sum().eval().reshape(Array2(1, 1)).broadcast(Array2(h, w));
    weight_grad_mat_t.device(place) =
D
dengkaipeng 已提交
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
        out_grad_mat_t * (out_grad_mat_t.constant(1.0) - uv_t * weight_mat_t) /
        sigma_t;

    if (dim != 0) {
      std::vector<int> perm;
      for (int i = 0; i < rank; i++) {
        if (i < dim) {
          perm.push_back(i + 1);
        } else if (i == dim) {
          perm.push_back(0);
        } else {
          perm.push_back(i);
        }
      }
      weight_grad->mutable_data<T>(dims, ctx.GetPlace());
      TransCompute<DeviceContext, T>(
266
          rank, weight_grad_mat.Resize(phi::make_ddim(real_dims)), weight_grad,
267
          perm, dev_ctx);
D
dengkaipeng 已提交
268
    } else {
269 270
      paddle::framework::TensorCopySync(weight_grad_mat.Resize(dims),
                                        ctx.GetPlace(), weight_grad);
D
dengkaipeng 已提交
271
    }
272
  }
D
dengkaipeng 已提交
273 274 275 276
};

}  // namespace operators
}  // namespace paddle