squared_l2_distance_op.h 4.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
          typename IndexType = Eigen::DenseIndex>
25
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
26 27 28 29 30

template <typename Place, typename T>
class SquaredL2DistanceKernel : public framework::OpKernel {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
    auto* in0 = context.Input<Tensor>("X");
    auto* in1 = context.Input<Tensor>("Y");
    auto* out0 = context.Output<Tensor>("sub_result");
    auto* out1 = context.Output<Tensor>("Out");

    auto in0_dims = in0->dims();
    auto in1_dims = in1->dims();

    int cols = framework::product(in0_dims) / in0_dims[0];
    // reduce dimensions except the first
    auto x =
        EigenMatrix<T>::From(*in0, framework::make_ddim({in0_dims[0], cols}));
    auto y =
        EigenMatrix<T>::From(*in1, framework::make_ddim({in1_dims[0], cols}));

    out0->mutable_data<T>(context.GetPlace());
    out1->mutable_data<T>(context.GetPlace());
    auto sub_result = EigenMatrix<T>::From(*out0);
    auto z = EigenMatrix<T>::From(*out1);
50

51
    auto place = context.GetEigenDevice<Place>();
52 53
    auto x_dims = x.dimensions();
    auto y_dims = y.dimensions();
54
    // buffer the substraction result
55
    if (y_dims[0] == 1 && x_dims[0] > y_dims[0]) {
Y
yangyaming 已提交
56 57 58
      sub_result.device(place) =
          x -
          y.broadcast(Eigen::array<int, 2>({static_cast<int>(x_dims[0]), 1}));
59
    } else {
60
      sub_result.device(place) = x - y;
61
    }
Y
yangyaming 已提交
62 63 64 65
    auto sub_res_pow2 = sub_result * sub_result;
    z.device(place) =
        sub_res_pow2.sum(Eigen::array<int, 1>({1}))
            .reshape(Eigen::array<int, 2>({static_cast<int>(x_dims[0]), 1}));
66 67 68 69 70 71 72
  }
};

template <typename Place, typename T>
class SquaredL2DistanceGradKernel : public framework::OpKernel {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
73 74 75 76
    auto* in0 = context.Input<Tensor>("sub_result");
    auto* in1 = context.Input<Tensor>(framework::GradVarName("Out"));
    auto* x_g = context.Output<Tensor>(framework::GradVarName("X"));
    auto* y_g = context.Output<Tensor>(framework::GradVarName("Y"));
77

78 79
    auto sub_result = EigenMatrix<T>::From(*in0);
    auto out_grad = EigenMatrix<T>::From(*in1);
80

81 82
    auto x_dims = x_g->dims();
    auto y_dims = y_g->dims();
83

84
    int cols = framework::product(x_dims) / x_dims[0];
85
    // calculate gradient
86 87
    auto grad_mat =
        2 * (out_grad.broadcast(Eigen::array<int, 2>({1, cols}))) * sub_result;
88 89

    // propagate back to input
90
    auto eigen_place = context.GetEigenDevice<Place>();
Y
yangyaming 已提交
91
    if (x_g) {
92 93 94 95
      x_g->mutable_data<T>(context.GetPlace());
      // eigen matrix
      auto x_grad =
          EigenMatrix<T>::From(*x_g, framework::make_ddim({x_dims[0], cols}));
96
      // dimensions are same with subResult
97
      x_grad.device(eigen_place) = grad_mat;
98
    }
99

Y
yangyaming 已提交
100
    if (y_g) {
101 102 103 104 105
      y_g->mutable_data<T>(context.GetPlace());
      auto y_grad =
          EigenMatrix<T>::From(*y_g, framework::make_ddim({y_dims[0], cols}));

      PADDLE_ENFORCE(sub_result.dimensions()[0] >= y_dims[0],
106 107 108
                     "First dimension of gradient must be greater or "
                     "equal than first dimension of target");

109 110
      if (sub_result.dimensions()[0] == y_dims[0]) {
        y_grad.device(eigen_place) = -1 * grad_mat;
111
      } else {
Y
yangyaming 已提交
112
        auto col_sum_res = -1 * (grad_mat.sum(Eigen::array<int, 1>({0})));
113
        y_grad.device(eigen_place) =
Y
yangyaming 已提交
114
            col_sum_res.reshape(Eigen::array<int, 2>({1, cols}));
115 116
      }
    }
117 118 119 120 121
  }
};

}  // namespace operators
}  // namespace paddle